Looking for example on using DXVA2 HEVC video decoding

Examples and samples for those who develop software with FFmpeg.
Post Reply
linuxfedora
Posts: 2
Joined: Wed Apr 20, 2016 7:43 am

Looking for example on using DXVA2 HEVC video decoding

Post by linuxfedora » Tue May 17, 2016 8:56 am

Hi All,

Is there any complete example on using DXVA2 to decode HEVC video?

This is my correct code for decoding video:

Code: Select all


#pragma once

#define __STDC_CONSTANT_MACROS

#include <windows.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libswscale/swscale.h>

//Use ring buffer for ffmpeg decoder.
#define BUFFER_SIZE      1024 * FF_INPUT_BUFFER_PADDING_SIZE * 2

#define kCodecMJPEG      1
#define kCodecH264      2
#define kCodecMPEG4      3
#define kCodecH265      4


class DecoderFFMPEG
{
private:
   CRITICAL_SECTION   m_cs;

   //AVFormatContext*   m_pFormatCtx;
   AVFrame*         m_pFrame;   
   AVCodecContext*      m_pCodecCtx;
   AVCodec*         m_pCodec;
   AVInputFormat*      m_pAVInputFormat;
   AVFormatContext*   m_pAVFormatCtx;

   int               m_bufSize;
   uint8_t*         m_buffer;   
   AVIOContext*      m_pAVIOCtx;

   bool            m_probed;   
   AVPacket         m_packet;

   BYTE*            m_inData;
   int               m_inDataReadIndex;
   int               m_inDataSize;
   int               m_count;

   int               m_oldSize;
   uint8_t*         m_oldData;

   int               m_currentCodec;
   int               m_currentWidth;
   int               m_currentHeight;
   int               mIsFrameReady;

   int mFileNum;

   bool isInitedFrameRGB;


public:
   DecoderFFMPEG(void);
   ~DecoderFFMPEG(void);

   int setupDecoder( int codec, int width, int height);
   int startDecode(BYTE* encBuf, int encBufLen);
   int getImage(BYTE* outImgBuf, int putImgBufLen);
   int getVideoRawData(BYTE* outRawBuf, int outRawBufLen);
    int SetDataBuffer(BYTE* data, int length);
   static int ConvertImgFormat(byte* inImgBuf, byte* outImgBuf, int srcImgFormat, int destImgFormat, int rawW, int rawH, int destW, int destH);
private:
   int InitDecoder(int codec);
   
   int InternalReadMethod(uint8_t *buf, int bufSize);
   static int ReadData(void *opaque, uint8_t *buf, int bufSize);

   void Clear();
};

Code: Select all


#include "stdafx.h"
#include "DecoderFFMPEG.h"
#include "H264HeaderParser.h"

DecoderFFMPEG::DecoderFFMPEG(void)
{
   InitializeCriticalSection(&m_cs);

   av_register_all();

   m_currentCodec = 0;   
   m_currentWidth = 0;
   m_currentHeight = 0;
   m_pCodecCtx = 0;
   m_pCodec = 0;
   m_pFrame = 0;
   m_probed = false;
   m_buffer = 0;
   m_count = 0;
   m_oldSize = 0;

   m_inDataReadIndex = 0;
   m_inData = 0;
   m_bufSize = BUFFER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;

   mFileNum =0;
   mIsFrameReady = 0;
   m_inDataSize = 0;

   isInitedFrameRGB = false;
}

DecoderFFMPEG::~DecoderFFMPEG(void)
{
   //avcodec_close(pCodecCtx);
   
   Clear();   
   DeleteCriticalSection(&m_cs);
}



int DecoderFFMPEG::InternalReadMethod(uint8_t *buf, int bufSize)
{
   int readDataSize = 0;

   if(m_inDataSize > 0 && bufSize > 0)
   {
      if(bufSize >= m_inDataSize)
      {
         readDataSize = m_inDataSize;               
      }
      else
      {
         readDataSize = bufSize;         
      }

      memcpy(buf, m_inData + m_inDataReadIndex, readDataSize);

      //fwrite(m_inData + m_inDataReadIndex, 1, readDataSize, m_testFile);

      m_inDataReadIndex += readDataSize;
      m_inDataSize -= readDataSize;      
   }
   
   return readDataSize;

}

int DecoderFFMPEG::ReadData(void *opaque, uint8_t *buf, int bufSize)
{
   return ((DecoderFFMPEG*)opaque)->InternalReadMethod(buf, bufSize);
}

int DecoderFFMPEG::InitDecoder(int codec)
{
   EnterCriticalSection(&m_cs);
   if(!m_probed)
   {
      m_pFrame = av_frame_alloc();      

      m_buffer = (uint8_t*)av_mallocz(m_bufSize*sizeof(uint8_t));         

      m_pAVIOCtx = avio_alloc_context(m_buffer, BUFFER_SIZE, NULL, this, ReadData, NULL, NULL);
   
      if(codec == kCodecMJPEG)
      {
         m_pAVInputFormat = av_find_input_format("mjpeg");
      }
      else if(codec == kCodecMPEG4)
      {
         m_pAVInputFormat = av_find_input_format("m4v");
      }
      else if(codec == kCodecH264)
      {
         m_pAVInputFormat = av_find_input_format("h264");
      }
      else if(codec == kCodecH265)
      {
         m_pAVInputFormat = av_find_input_format("hevc");
      }

      m_pAVFormatCtx = avformat_alloc_context();

      if(m_pAVInputFormat != 0)
      {
         m_pAVInputFormat->flags |= AVFMT_NOFILE;
      }
      else
      {
         return -7;
      }

      m_pAVFormatCtx->pb = m_pAVIOCtx;

      if (avformat_open_input(&m_pAVFormatCtx, "", m_pAVInputFormat, NULL) < 0) 
      {
         LeaveCriticalSection(&m_cs);
         return -1;
      }

      // Find the first video stream
      int videoStream=-1;
       int i = 0;
      for(i=0; i<(int)m_pAVFormatCtx->nb_streams; i++)
      {
         if(m_pAVFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
         {
            videoStream=i;
            break;
         }
      }
      if(videoStream == -1)
      {
         LeaveCriticalSection(&m_cs);
         return -3; // Didn't find a video stream
      }

      // Get a pointer to the codec context for the video stream
      m_pCodecCtx=m_pAVFormatCtx->streams[videoStream]->codec;

      // Find the decoder for the video stream
      m_pCodec=avcodec_find_decoder(m_pCodecCtx->codec_id);

      if(m_pCodec==NULL)
      {
         LeaveCriticalSection(&m_cs);
         return -4; // Codec not found
      }

      m_pCodecCtx = avcodec_alloc_context3(m_pCodec);

      // Inform the codec that we can handle truncated bitstreams -- i.e.,
      // bitstreams where frame boundaries can fall in the middle of packets
      if(m_pCodec->capabilities & CODEC_CAP_TRUNCATED)
      {
         m_pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
      }

      // Open codec
      if(avcodec_open2(m_pCodecCtx, m_pCodec, NULL) < 0)
      {
         return -5; // Could not open codec
      }      

      m_probed = true;
   }
    LeaveCriticalSection(&m_cs);
   return 1;
}

int DecoderFFMPEG::SetDataBuffer(BYTE* data, int length)
{   
   m_inDataReadIndex = 0;
   m_inData = data;
   m_inDataSize = length;
  
   return 1;

}

int DecoderFFMPEG::setupDecoder(int codec, int width, int height)
{
   int result = 1;
   if(m_currentCodec != codec || m_currentWidth != width || m_currentHeight != height)
   {
      Clear();
      m_currentCodec = codec;
      m_currentWidth = width;
      m_currentHeight = height;

      result = InitDecoder(codec);

      if(result <= 0)
      {
         m_currentCodec = -1;
      }
   }
   return result;
}

int DecoderFFMPEG::startDecode(BYTE*inDataBuf, int dataLen)
{
   //printf("Decode %d %d... ", dataLen, inDataBuf);
   if (!inDataBuf)
   {
      return 0;
   }

   SetDataBuffer(inDataBuf,dataLen);

   EnterCriticalSection(&m_cs);
   int bytesDecodedReturn = -1;
   if(m_probed)
   {
      int bytesDecoded = 0;

      while(av_read_frame(m_pAVFormatCtx, &m_packet) >= 0)
      {
         if (m_packet.stream_index == 0)
         {
            m_oldSize = m_packet.size;
            m_oldData = m_packet.data;

            while(m_packet.size > 0)
            {
               if(m_pCodecCtx->codec <= 0 )
               {
                  bytesDecodedReturn = -2;
                  break;
               }
               bytesDecoded = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &mIsFrameReady, &m_packet);
               
               if (bytesDecoded >= 0)
               {
                  m_packet.size -= bytesDecoded;
                  m_packet.data += bytesDecoded;
                  bytesDecodedReturn =  bytesDecodedReturn == -1? bytesDecoded:bytesDecodedReturn+bytesDecoded;
               }
               else
               {
                  if(bytesDecodedReturn >=0)
                  {
                     mIsFrameReady = true;
                  }
                  break;
               }
            }

            m_packet.size = m_oldSize;
            m_packet.data = m_oldData;             
         }
         av_free_packet(&m_packet);
      }
   }
   LeaveCriticalSection(&m_cs);

   return bytesDecodedReturn;
}



int DecoderFFMPEG::getImage( BYTE* outImgBuf, int putImgBufLen )
{
   EnterCriticalSection(&m_cs);
   int rtn = -1;
   if (mIsFrameReady)
   {
      struct SwsContext *pSWSContext;

      int w = m_pCodecCtx->width;
      int h = m_pCodecCtx->height;
      if(putImgBufLen >= w*h*4)
      {

         pSWSContext = sws_getContext(w, h, m_pCodecCtx->pix_fmt, w, h, AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
         
         //avpicture_alloc((AVPicture *)&mFrameRGB, PIX_FMT_RGB32, w, h);

         AVFrame frameRGB;
         avpicture_fill((AVPicture *)&frameRGB, (uint8_t*)outImgBuf,AV_PIX_FMT_BGRA, w, h );

         sws_scale(pSWSContext, m_pFrame->data, m_pFrame->linesize, 0, h, frameRGB.data, frameRGB.linesize);      

         //For RGB image, only data 0 is used, so just copy the memory to it directly.
         //memcpy(outImgBuf, mFrameRGB.data[0], 4 * w * h);

         sws_freeContext(pSWSContext);
         //avpicture_free((AVPicture *)&mFrameRGB);   
         rtn = 0;
      }
   }
   LeaveCriticalSection(&m_cs);
   return rtn;
}

int DecoderFFMPEG::ConvertImgFormat(byte* inImgBuf, byte* outImgBuf, int srcImgFormat, int destImgFormat, int rawW, int rawH, int destW, int destH)
{
   struct SwsContext *pSWSContext;
   pSWSContext = sws_getContext(rawW, rawH, (AVPixelFormat)srcImgFormat, destW, destH, (AVPixelFormat)destImgFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL);

   AVFrame mFrameSrc;
   AVFrame mFrameDest;
   avpicture_fill((AVPicture *)&mFrameSrc, (uint8_t *)inImgBuf, (AVPixelFormat)srcImgFormat, rawW, rawH);
   avpicture_fill((AVPicture *)&mFrameDest, (uint8_t *)outImgBuf, (AVPixelFormat)destImgFormat, destW, destH);

   sws_scale(pSWSContext, mFrameSrc.data, mFrameSrc.linesize, 0, rawH, mFrameDest.data, mFrameDest.linesize);
   sws_freeContext(pSWSContext);   

   return 1;
}

void DecoderFFMPEG::Clear()
{
   EnterCriticalSection(&m_cs);

   if(m_probed)
   {      
      m_currentCodec = 0;

      if(m_buffer != 0)
      {
         av_free(m_buffer);
         m_buffer = 0;
      }

      if(m_pFrame != 0)
      {
         av_free(m_pFrame);   
         m_pFrame = 0;
      }

      if(m_pAVIOCtx != 0)
      {
         av_free(m_pAVIOCtx);
         m_pAVIOCtx = 0;
      }

      if(m_pCodecCtx != 0)
      {
         avcodec_close(m_pCodecCtx);
         m_pCodecCtx = 0;
      }

      avformat_close_input(&m_pAVFormatCtx);   

      m_probed = false;
   }

   LeaveCriticalSection(&m_cs);
}


int DecoderFFMPEG::getVideoRawData( BYTE* outRawBuf, int outRawBufLen )
{
   memcpy(outRawBuf, m_inData, outRawBufLen);
   return 0;
}

Post Reply