Cannot Encode

For the developers that use FFmpeg in their software.
Post Reply
loupusxx
Posts: 3
Joined: Wed Dec 27, 2017 7:07 am

Cannot Encode

Post by loupusxx » Wed Dec 27, 2017 7:13 am

Hi, I am doing something wrong, but couldnt find out the problem. When I run the below code, for video encoding part it says too many b-frames in a row for a while then access violation error. Can you please guide me

Code: Select all

#include "main.h"


SDL_Texture *texture = NULL;
SDL_Renderer *renderer = NULL;
SDL_Rect r;
SwsContext *sws_ctx = NULL;
SwrContext *swr_ctx = NULL;


typedef struct SettingsContext
{
	AVCodecID AudioCodecID;
	AVCodec *AudioCodec;
	AVSampleFormat AudioSampleFormat;
	int64_t AudioBitrate;
	int AudioSampleRate;
	int AudioChannelNumber;
	int AudioStreamIndex;
	

	AVCodecID VideoCodecID;
	AVCodec *VideoCodec;
	int64_t VideoBitrate;
	int VideoWidth;
	int VideoHeight;
	AVRational VideoTimeBase;
	AVPixelFormat VideoPixelFormat;
	int VideoStreamIndex;

	char *Inputfile;
	char *Outputfile;

	AVFormatContext *inFmt;
	AVFormatContext *outFmt;
	AVCodecContext *AudioDecCtx;
	AVCodecContext *AudioEncCtx;
	AVCodecContext *VideoDecCtx;
	AVCodecContext *VideoEncCtx;

} SettingsContext;

int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
	const enum AVSampleFormat *p = codec->sample_fmts;

	while (*p != AV_SAMPLE_FMT_NONE) {
		if (*p == sample_fmt)
			return 1;
		p++;
	}
	return 0;
}

int select_sample_rate(AVCodec *codec)
{
	const int *p;
	int best_samplerate = 0;

	if (!codec->supported_samplerates)
		return 44100;

	p = codec->supported_samplerates;
	while (*p) {
		best_samplerate = FFMAX(*p, best_samplerate);
		p++;
	}
	return best_samplerate;
}

int select_channel_layout(AVCodec *codec)
{
	const uint64_t *p;
	uint64_t best_ch_layout = 0;
	int best_nb_channels = 0;

	if (!codec->channel_layouts)
		return AV_CH_LAYOUT_STEREO;

	p = codec->channel_layouts;
	while (*p) {
		int nb_channels = av_get_channel_layout_nb_channels(*p);

		if (nb_channels > best_nb_channels) {
			best_ch_layout = *p;
			best_nb_channels = nb_channels;
		}
		p++;
	}
	return best_ch_layout;
}

AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id, SettingsContext *pSettings)
{
	AVCodecContext *c;
	AVStream *st;
	/* find the encoder */
	*codec = avcodec_find_encoder(codec_id);
	if (!(*codec)) {
		fprintf(stderr, "Could not find encoder for '%s'\n",
			avcodec_get_name(codec_id));
		exit(1);
	}
	st = avformat_new_stream(oc, *codec);
	if (!st) {
		fprintf(stderr, "Could not allocate stream\n");
		exit(1);
	}
	st->id = oc->nb_streams - 1;
	c = st->codec;
	switch ((*codec)->type) {
	case AVMEDIA_TYPE_AUDIO:
		c->sample_fmt = pSettings->AudioSampleFormat;
		if (!check_sample_fmt(*codec, c->sample_fmt))
		{
			fprintf(stderr, "Encoder does not support sample format %s", av_get_sample_fmt_name(c->sample_fmt));
			exit(1);
		}
		c->sample_rate = select_sample_rate(*codec);
		c->channel_layout = select_channel_layout(*codec);;
		c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
		c->bit_rate = pSettings->AudioBitrate;
		break;
	case AVMEDIA_TYPE_VIDEO:
		c->codec_id = codec_id;
		c->bit_rate = pSettings->VideoBitrate;
		/* Resolution must be a multiple of two. */
		c->width = pSettings->VideoWidth;
		c->height = pSettings->VideoHeight;

		c->time_base = pSettings->VideoTimeBase;
		c->gop_size = 12; /* emit one intra frame every twelve frames at most */
		c->pix_fmt = pSettings->VideoPixelFormat;
		if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
			/* just for testing, we also add B frames */
			c->max_b_frames = 2;
		}
		if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
			/* Needed to avoid using macroblocks in which some coeffs overflow.
			* This does not happen with normal video, it just happens here as
			* the motion of the chroma plane does not match the luma plane. */
			c->mb_decision = 2;
		}

		break;
	default:
		break;
	}
	/* Some formats want stream headers to be separate. */
	if (oc->oformat->flags & AVFMT_GLOBALHEADER)
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	return st;
}

int open_input_file(SettingsContext *pSettings)
{
	int ret;
	unsigned int i;
	//AVFormatContext *ifmt_ctx = pSettings->inFmt;
	if ((ret = avformat_open_input(&pSettings->inFmt, pSettings->Inputfile, NULL, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
		return ret;
	}
	if ((ret = avformat_find_stream_info(pSettings->inFmt, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
		return ret;
	}
	for (i = 0; i < pSettings->inFmt->nb_streams; i++) {
		AVStream *stream;
		AVCodecContext *codec_ctx;
		stream = pSettings->inFmt->streams[i];
		codec_ctx = stream->codec;
		/* Reencode video & audio and remux subtitles etc. */
		if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&	pSettings->VideoStreamIndex < 0)
		{
			/* Open decoder */
			ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
				return ret;
			}
			pSettings->VideoStreamIndex = i;
			pSettings->VideoDecCtx = codec_ctx;

		}

		if (codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO &&	pSettings->AudioStreamIndex < 0)
		{
			/* Open decoder */
			ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
				return ret;
			}
			pSettings->AudioStreamIndex = i;
			pSettings->AudioDecCtx = codec_ctx;

		}
	}
	av_dump_format(pSettings->inFmt, 0, pSettings->Inputfile, 0);
	return 0;
}

int open_output_file(SettingsContext *pSettings)
{
	int ret;
	AVStream *stream = NULL;
	stream = add_stream(pSettings->outFmt, &pSettings->VideoCodec, pSettings->VideoCodecID, pSettings);

	ret = avcodec_open2(stream->codec, stream->codec->codec, NULL);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open encoder for stream \n");
		return ret;
	}
	pSettings->VideoEncCtx = stream->codec;

	stream = NULL;
	stream = add_stream(pSettings->outFmt, &pSettings->AudioCodec, pSettings->AudioCodecID, pSettings);

	ret = avcodec_open2(stream->codec, stream->codec->codec, NULL);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open encoder for stream \n");
		return ret;
	}

	pSettings->AudioEncCtx = stream->codec;

	av_dump_format(pSettings->outFmt, 0, pSettings->Outputfile, 1);

	if (!(pSettings->outFmt->oformat->flags & AVFMT_NOFILE)) 
	{
		ret = avio_open(&pSettings->outFmt->pb, pSettings->Outputfile, AVIO_FLAG_WRITE);
		if (ret < 0) 
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", pSettings->Outputfile);
			return ret;
		}
	}
	/* init muxer, write output file header */
	ret = avformat_write_header(pSettings->outFmt, NULL);
	if (ret < 0) 
	{
		av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
		return ret;
	}
	return 0;
}

int init_resampler(AVCodecContext *input_codec_context, AVCodecContext *output_codec_context, SwrContext **resample_context)
{
	int error;
	/*
	* Create a resampler context for the conversion.
	* Set the conversion parameters.
	* Default channel layouts based on the number of channels
	* are assumed for simplicity (they are sometimes not detected
	* properly by the demuxer and/or decoder).
	*/
	*resample_context = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(output_codec_context->channels),
		output_codec_context->sample_fmt,
		output_codec_context->sample_rate,
		av_get_default_channel_layout(input_codec_context->channels),
		input_codec_context->sample_fmt,
		input_codec_context->sample_rate,
		0, NULL);
	if (!*resample_context) {
		fprintf(stderr, "Could not allocate resample context\n");
		return AVERROR(ENOMEM);
	}
	/*
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
	/* Open the resampler with the specified parameters. */
	if ((error = swr_init(*resample_context)) < 0) {
		fprintf(stderr, "Could not open resample context\n");
		swr_free(resample_context);
		return error;
	}
	return 0;
}

int init_SDL(AVCodecContext *VideoCtx)
{
	SDL_Window *window;

	if (SDL_Init(SDL_INIT_VIDEO)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		return -1;
	}
	window = SDL_CreateWindow(
		"Ön İzleme",
		SDL_WINDOWPOS_UNDEFINED,
		SDL_WINDOWPOS_UNDEFINED,
		VideoCtx->width,
		VideoCtx->height,
		0
		);
	r.x = 0;
	r.y = 0;
	r.w = VideoCtx->width;
	r.h = VideoCtx->height;


	if (!window) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		return -1;
	}

	renderer = SDL_CreateRenderer(window, -1, 0);
	if (!renderer) {
		fprintf(stderr, "SDL: could not create renderer - exiting\n");
		return -1;
	}
	texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, VideoCtx->width, VideoCtx->height);
	if (!texture){ printf("SDL: could not create texture\n"); return -1; }
	sws_ctx = sws_getContext(VideoCtx->width, VideoCtx->height, VideoCtx->pix_fmt, VideoCtx->width, VideoCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
	if (!texture) { printf("SDL: could not create rescane context\n"); return -1; }
	return 0;
}

int decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt)
{

	int ret;

	ret = avcodec_send_packet(dec_ctx, pkt);
	if (ret < 0) {
		fprintf(stderr, "Error sending a packet for decoding\n");
		return ret;
	}
	while (ret >= 0) {
		ret = avcodec_receive_frame(dec_ctx, frame);
		return ret;

		/*
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) // AVERROR(EAGAIN) => decode edecek yeterli veri yok , AVERROR_EOF => decoder kurudu 
			return;										  // yeni paket gönder
		else if (ret < 0) {								  //hataya verdiğinde decoderi boşaltıyor mu
			fprintf(stderr, "Error during decoding\n");
			exit(1);
		}
		*/
	}
}

int encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt)
{
	int ret;

	/* send the frame for encoding */
	ret = avcodec_send_frame(enc_ctx, frame);
	if (ret < 0) {
		fprintf(stderr, "Error sending the frame to the encoder\n");
		return ret;
	}
	/* read all the available output packets (in general there may be any
	* number of them */
	while (ret >= 0) {
		ret = avcodec_receive_packet(enc_ctx, pkt);
		return ret;
		
	}
}

void displayFrame(AVFrame* frame, AVCodecContext *dec_ctx)
{
	sws_scale(sws_ctx, (uint8_t const * const *)frame->data, frame->linesize, 0, dec_ctx->height, frame->data, frame->linesize);
	SDL_UpdateYUVTexture(texture, &r, frame->data[0], frame->linesize[0], frame->data[1], frame->linesize[1], frame->data[2], frame->linesize[2]);
	SDL_RenderClear(renderer);
	SDL_RenderCopy(renderer, texture, NULL, NULL);
	SDL_RenderPresent(renderer);
}

AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
{
	int ret;
	AVFrame *frame = av_frame_alloc();	

	if (!frame) {
		fprintf(stderr, "Error allocating an audio frame\n");
		exit(1);
	}

	frame->format = sample_fmt;
	frame->channel_layout = channel_layout;
	frame->sample_rate = sample_rate;
	frame->nb_samples = nb_samples;

	if (nb_samples) {
		ret = av_frame_get_buffer(frame, 0);
		if (ret < 0) {
			fprintf(stderr, "Error allocating an audio buffer\n");
			exit(1);
		}
	}

	return frame;
}

void CommitSettings(SettingsContext *pSettings)
{
	AVRational vfrmRate;
	vfrmRate.num = 1;
	vfrmRate.den = 25;

	pSettings->Inputfile = "D:\\movie\\iboy.mkv";
	pSettings->Outputfile = "D:\\movie\\alomelo.mpg";
	pSettings->AudioCodecID = AV_CODEC_ID_MP2;
	pSettings->AudioBitrate = 64000;
	pSettings->AudioSampleFormat = AV_SAMPLE_FMT_S16;
	pSettings->AudioSampleRate = 48000;
	pSettings->AudioChannelNumber = 2;


	pSettings->VideoCodecID = AV_CODEC_ID_MPEG2VIDEO;
	pSettings->VideoBitrate = 400000;
	pSettings->VideoWidth = 720;
	pSettings->VideoHeight = 576;
	pSettings->VideoPixelFormat = AV_PIX_FMT_YUV420P;
	pSettings->VideoTimeBase = vfrmRate;

	pSettings->inFmt = NULL;
	pSettings->outFmt = NULL;
}

const char *get_error_text(const int error)
 {
	     static char error_buffer[255];
	     av_strerror(error, error_buffer, sizeof(error_buffer));
	     return error_buffer;
	 }

int main()
{
	AVFrame *frame = NULL;
	AVPacket InPacket;
	AVPacket OutPacket;	
	AVPacket *pkt;
	
	SwrContext *resample_context = NULL;
	SettingsContext settings;
	int ret;

	CommitSettings(&settings);

	InPacket.data = NULL;
	InPacket.size = 0;

	OutPacket.data = NULL;
	OutPacket.size = 0;

	av_register_all();

	avformat_alloc_output_context2(&(settings.outFmt), NULL, NULL, settings.Outputfile);
	if (!settings.outFmt)
		return -1;


	if ((ret = open_input_file(&settings)) < 0)
		goto end;
	if ((ret = open_output_file(&settings)) < 0)
		goto end;

	ret = init_SDL(settings.VideoDecCtx);
	if (ret == -1) goto end;

	if (init_resampler(settings.AudioDecCtx, settings.AudioEncCtx, &resample_context))
	{
		fprintf(stderr, "SDL: could not init resample context\n");
		goto end;
	}

	frame = av_frame_alloc();

	
	pkt = av_packet_alloc();
	if (!pkt)
		exit(1);

	while (1)
	{
		if ((ret = av_read_frame(settings.inFmt, &InPacket)) < 0)
			goto end;
		if (!frame) frame = av_frame_alloc();
		if (!frame) goto end;
		if (InPacket.stream_index == settings.AudioStreamIndex)
		{
			ret = decode(settings.AudioDecCtx, frame, &InPacket);
		//	if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)		// yeni paket gönder
			if (ret < 0)
			{
				av_frame_free(&frame);
				av_packet_unref(&InPacket);
				continue;
			}
			
			/*
			AVFrame *audioFrame = alloc_audio_frame(settings.AudioEncCtx->sample_fmt, settings.AudioEncCtx->channel_layout, settings.AudioEncCtx->sample_rate, settings.AudioEncCtx->frame_size);		
			if (!audioFrame)
			{
				printf("failed to allocate audioFrame");
				goto end;
			}

	

		int	dst_nb_samples = av_rescale_rnd(swr_get_delay(resample_context, settings.AudioEncCtx->sample_rate) + frame->nb_samples,
				settings.AudioEncCtx->sample_rate, settings.AudioEncCtx->sample_rate, AV_ROUND_UP);
		
		ret = av_frame_make_writable(audioFrame);
		         if (ret < 0)
			           goto end;

		ret = swr_convert(resample_context, (uint8_t**)(audioFrame->data), dst_nb_samples, (const uint8_t**)frame->extended_data, frame->nb_samples);
			if (ret < 0)
			{
				printf("failed to resample audioFrame");
				goto end;
			}

			av_assert0(dst_nb_samples == frame->nb_samples);

			//encoding audio
			ret = encode(settings.AudioEncCtx, audioFrame, &OutPacket);
			if (ret < 0)		// yeni paket gönder
			{
				av_frame_free(&frame);
				av_packet_unref(&InPacket);
				if (OutPacket.side_data_elems>0)
					av_packet_unref(&OutPacket);
				continue;
			}
			free(audioFrame);
			*/
		}
		if (InPacket.stream_index == settings.VideoStreamIndex)
		{
			ret = decode(settings.VideoDecCtx, frame, &InPacket);
	//		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)		// yeni paket gönder
			if (ret < 0)
			{
				av_frame_free(&frame);
				av_packet_unref(&InPacket);
				continue;
			}
			displayFrame(frame, settings.VideoDecCtx);

			//encoding video
			if (!pkt)
				pkt = av_packet_alloc();
			if (!pkt)
				goto end;
			ret = encode(settings.VideoEncCtx, frame, pkt);
			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)		// yeni paket gönder
			{
				av_frame_free(&frame);
				av_packet_unref(&InPacket);
				av_packet_unref(pkt);
				continue;
			}
			ret = av_write_frame(settings.outFmt, pkt);
			if (ret < 0)
			(stderr, "Could not write frame (error '%s')\n",  get_error_text(ret));

		}
		av_frame_free(&frame);
		av_packet_unref(&InPacket);
		av_packet_unref(pkt);

	}

end:
	av_frame_free(&frame);
	av_packet_unref(&InPacket);
}

Post Reply