前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >ffmpeg音视频合成

ffmpeg音视频合成

作者头像
曾大稳
发布2018-09-11 10:29:45
3.1K1
发布2018-09-11 10:29:45
举报
文章被收录于专栏:曾大稳的博客

原理 : 主要是拿到视频文件得视频流,然后拿到音频文件的音频流,根据时间戳一帧一帧的封装成一个新的视频文件

效果:音频文件和视频文件合成一个文件,合成的文件时间就是两个文件中短的时间。 源代码如下:具体看注释

代码语言:javascript
复制


#include <jni.h>
#include <android/log.h>

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
};


#define  LOG_TAG    "JNI_TAG"
#define  LOGD(...)  __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)


extern "C"
JNIEXPORT void JNICALL
Java_com_zzw_ffmpegdemo_FFmpegHelper_megre(JNIEnv *env, jobject instance, jstring musicPath_,
                                           jstring videoPath_,jstring outPath_) {


    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL,*ofmt_ctx = NULL;
    int ret, i;
    int videoindex_v=-1,videoindex_out=-1;
    int audioindex_a=-1,audioindex_out=-1;
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;


    const char *musicPath = env->GetStringUTFChars(musicPath_, 0);
    const char *videoPath = env->GetStringUTFChars(videoPath_, 0);
    const char *outPath = env->GetStringUTFChars(outPath_, 0);

    av_register_all();
    //--------------------------------input init start---------------------------------------------
    if ((ret = avformat_open_input(&ifmt_ctx_v, videoPath, 0, 0)) < 0) {//打开输入的视频文件
        LOGD( "Could not open input video file.");
        goto end;
    }
   if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {//获取视频文件信息
        LOGD( "Failed to retrieve input video stream information");
        goto end;
    }
    if ((ret = avformat_open_input(&ifmt_ctx_a, musicPath, 0, 0)) < 0) {//打开输入的音频文件
        LOGD( "Could not open input audio file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {//获取音频文件信息
        LOGD( "Failed to retrieve input audio stream information");
        goto end;
    }

//    LOGD("===========Input Information==========\n");
//    av_dump_format(ifmt_ctx_v, 0, videoPath, 0);
//    av_dump_format(ifmt_ctx_a, 0, musicPath, 0);
//    LOGD("======================================\n");

    //--------------------------------input init end---------------------------------------------

    //--------------------------------out init start---------------------------------------------
    //初始化输出码流的AVFormatContext
    avformat_alloc_output_context2(&ofmt_ctx,NULL,NULL, outPath);
    if(!ofmt_ctx){
        LOGD( "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    //--------------------------------out init end-----------------------------------------------


    //--------------------------------相关值获取-----------------------------------------------
    //从输入video的AVStream中获取一个video输出的out_stream
    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        if(ifmt_ctx_v->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
            AVStream* in_stream = ifmt_ctx_v->streams[i];
            AVCodec *dec = avcodec_find_decoder(in_stream->codecpar->codec_id);
            if(!dec){
                LOGD( "Could not find decoder\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            AVStream* out_stream = avformat_new_stream(ofmt_ctx,dec);
            videoindex_v =i;
            if(!out_stream){
                LOGD( "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out=out_stream->index;

            AVCodecContext* avCodecContext = avcodec_alloc_context3(dec);
            if ((ret =avcodec_parameters_to_context(avCodecContext, in_stream->codecpar)) < 0) {
                avcodec_free_context(&avCodecContext);
                avCodecContext = NULL;
                LOGD("can not fill decodecctx");
                goto end;
            }
            avCodecContext->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
                avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
            }
            ret = avcodec_parameters_from_context(out_stream->codecpar, avCodecContext);
            if (ret < 0) {
                printf("Failed to copy context input to output stream codec context\n");
                goto end;
            }
            break;
        }
    }
    //从输入audio的AVStream中获取一个audio输出的out_stream
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        if(ifmt_ctx_a->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
            AVStream* in_stream = ifmt_ctx_a->streams[i];
            AVCodec *dec = avcodec_find_decoder(in_stream->codecpar->codec_id);
            if(!dec){
                LOGD( "Could not find decoder\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            AVStream* out_stream = avformat_new_stream(ofmt_ctx,dec);
            audioindex_a =i;
            if(!out_stream){
                LOGD( "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out=out_stream->index;

            AVCodecContext* avCodecContext = avcodec_alloc_context3(dec);
            if ((ret =avcodec_parameters_to_context(avCodecContext, in_stream->codecpar)) < 0) {
                avcodec_free_context(&avCodecContext);
                avCodecContext = NULL;
                LOGD("can not fill decodecctx");
                goto end;
            }
            avCodecContext->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
                avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
            }
            ret = avcodec_parameters_from_context(out_stream->codecpar, avCodecContext);
            if (ret < 0) {
                printf("Failed to copy context input to output stream codec context\n");
                goto end;
            }
            break;
        }
    }

//    LOGD("==========Output Information==========\n");
//    av_dump_format(ofmt_ctx, 0, outPath, 1);
//    LOGD("======================================\n");


//    -------------------------------合成文件-------------------------------------------

    // Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, outPath, AVIO_FLAG_WRITE);
        if (ret < 0) {
            LOGD("Could not open output file %s ", outPath);
            goto end;
        }
    }

    // Write file header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        LOGD("Error occurred when opening output file\n");
        goto end;
    }


    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index=0;
        AVStream *in_stream, *out_stream;
        AVPacket *pkt = av_packet_alloc();

        //Get an AVPacket .   av_compare_ts是比较时间戳用的。通过该函数可以决定该写入视频还是音频。
        //video 在 audio之前
        if(av_compare_ts(cur_pts_v,
                         ifmt_ctx_v->streams[videoindex_v]->time_base,
                         cur_pts_a,
                         ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;
        } else{
            ifmt_ctx=ifmt_ctx_a;
            stream_index=audioindex_out;
        }

        //如果video在audio之后
        if(av_compare_ts(cur_pts_v,
                         ifmt_ctx_v->streams[videoindex_v]->time_base,
                         cur_pts_a,
                         ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;

            if(av_read_frame(ifmt_ctx, pkt) >= 0){
                do{
                    if(pkt->stream_index==videoindex_v){
                        in_stream  = ifmt_ctx->streams[pkt->stream_index];
                        out_stream = ofmt_ctx->streams[stream_index];
                        //FIX:No PTS (Example: Raw H.264) H.264裸流没有PTS,因此必须手动写入PTS
                        //Simple Write PTS
                        if(pkt->pts==AV_NOPTS_VALUE){
                            //Write PTS
                            AVRational time_base1=in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt->dts=pkt->pts;
                            pkt->duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v=pkt->pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, pkt) >= 0);
            }else{
                av_packet_free(&pkt);
                av_free(pkt);
                break;
            }
        }else{
            ifmt_ctx=ifmt_ctx_a;
            stream_index=audioindex_out;
            if(av_read_frame(ifmt_ctx, pkt) >= 0){
                do{
                    if(pkt->stream_index==audioindex_a){
                        in_stream  = ifmt_ctx->streams[pkt->stream_index];
                        out_stream = ofmt_ctx->streams[stream_index];
                        //FIX:No PTS
                        //Simple Write PTS
                        if(pkt->pts==AV_NOPTS_VALUE){
                            //Write PTS
                            AVRational time_base1=in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt->dts=pkt->pts;
                            pkt->duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a=pkt->pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, pkt) >= 0);
            }else{
                av_packet_free(&pkt);
                av_free(pkt);
                break;
            }
        }

        //Convert PTS/DTS
        pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
        pkt->pos = -1;
        pkt->stream_index=stream_index;

        LOGD("Write 1 Packet. size:%5d\tpts:%lld\n",pkt->size,pkt->pts);
        //Write AVPacket 音频或视频裸流
        if (av_interleaved_write_frame(ofmt_ctx, pkt) < 0) {
            LOGD( "Error muxing packet\n");
            av_packet_free(&pkt);
            av_free(pkt);
            break;
        }
        av_packet_free(&pkt);
        av_free(pkt);
    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    env->ReleaseStringUTFChars(musicPath_, musicPath);
    env->ReleaseStringUTFChars(videoPath_, videoPath);
    env->ReleaseStringUTFChars(outPath_, outPath);
}
本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2018-07-27,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档