Skip to content

Instantly share code, notes, and snippets.

@CypherpunkSamurai
Last active May 15, 2025 13:05
Show Gist options
  • Save CypherpunkSamurai/affe41b415f1a283de8bff8e496476ba to your computer and use it in GitHub Desktop.
Save CypherpunkSamurai/affe41b415f1a283de8bff8e496476ba to your computer and use it in GitHub Desktop.
FFmpeg Desktop Duplication API Encoder

FFmpeg Desktop Duplication API with LibAV in C

This is a example I created to use the desktop duplication api using ffmpeg libav in c. This is a port of capture desktop command.

# using hardware encoding
ffmpeg -init_hw_device d3d11va -filter_complex ddagrab=0 -c:v h264_nvenc -cq:v 20 output.mkv
# using cpu encoding
ffmpeg -filter_complex ddagrab=0,hwdownload,format=bgra -c:v libx264 -crf 20 output.mkv

Building or Installing Prebuilt FFmpeg Libraries and Headers

You will need Msys2. Install Msys2 first. Then run msys2_shell.cmd with flag -mingw64, the use this ffmpeg build scripts (build scripts code) to build latest ffmpeg, or install mingw-w64-x86_64-ffmpeg package in mingw64 for installing prebuilt ffmpeg binaries, libs and headers.

We will need pkg-config tool to be able to find the required libraries, ie libavcodec.pc file. We need to make sure pkg-config looks in the right folders to find it.

set PKG_CONFIG_PATH env var pointing to the mingw64\lib\pkgconfig folder. For me msys2 is installed in C:\Tools so it pointed it to pkgconfig folder in the mingw64\lib\pkgconfig folder of msys2.

Note: If you built ffmpeg yourself it should be in the --prefix=[whatever_folder] prefix folder you provided. It's usually under lib/pkgconfig.

Building this Code

You'll need cmake install cmake from msys2. (Just lookup msys2 cmake mingw64 package and copy paste the pacman command into the mingw64 terminal (make sure its mingw64 terminal, not mingw or ucrt etc)

Then run cmake build

# cd codebase
mkdir build
cd build

# ---- One Way ----
# cmake -G "MinGW Makefiles" ..
# cmake --build .
# or
# mingw32-make

# ---- Or Other Way ----
# Using Ninja (requires ninja)
cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_MAKE_PROGRAM="ninja" -G Ninja -S ".." -B "."

Resources

cmake_minimum_required(VERSION 3.10)
project(ffmpeg_desktop_try1 C)
set(CMAKE_C_STANDARD 11) # Or gnu17 if you prefer
find_package(PkgConfig REQUIRED)
pkg_check_modules(AVCODEC REQUIRED libavcodec)
pkg_check_modules(AVFORMAT REQUIRED libavformat)
pkg_check_modules(AVUTIL REQUIRED libavutil)
pkg_check_modules(SWSCALE REQUIRED libswscale)
# Add AVDEVICE here
pkg_check_modules(AVDEVICE REQUIRED libavdevice)
# AVFILTER is also needed for the filter graph
pkg_check_modules(AVFILTER REQUIRED libavfilter)
include_directories(${AVCODEC_INCLUDE_DIRS}
${AVFORMAT_INCLUDE_DIRS}
${AVUTIL_INCLUDE_DIRS}
${SWSCALE_INCLUDE_DIRS}
${AVDEVICE_INCLUDE_DIRS} # Add this
${AVFILTER_INCLUDE_DIRS} # Add this
)
add_executable(ffmpeg_desktop_try1 main.c)
target_link_libraries(ffmpeg_desktop_try1
${AVCODEC_LIBRARIES}
${AVFORMAT_LIBRARIES}
${AVUTIL_LIBRARIES}
${SWSCALE_LIBRARIES}
${AVDEVICE_LIBRARIES} # Add this
${AVFILTER_LIBRARIES} # Add this
# For Windows D3D11VA, ddagrab
d3d11 dxgi # These are system libraries, usually found without pkg-config
)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h> // For bool
#ifdef _WIN32
#include <d3d11.h> // Required for ddagrab and D3D11VA
#endif
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_d3d11va.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/imgutils.h>
#include <libavdevice/avdevice.h> // For avdevice_register_all()
#define OUTPUT_FILENAME "output.mkv"
#define CAPTURE_FRAMERATE 30 // Keep it reasonable for testing
#define CAPTURE_DURATION_SEC 5 // Shorten for quicker tests
// Global for simplicity in this example
static AVFormatContext *ifmt_ctx = NULL;
static AVCodecContext *dec_ctx = NULL; // Decoder for lavfi output
static AVCodecContext *enc_ctx = NULL; // Encoder
static AVFormatContext *ofmt_ctx = NULL;
static AVStream *out_stream = NULL;
static AVBufferRef *hw_device_ctx = NULL;
static int video_stream_index = -1;
static int64_t encoded_frame_count = 0;
static int64_t pts_offset = 0;
static int64_t last_input_pts = AV_NOPTS_VALUE;
static int final_ret = 0; // To store the definitive return code
static void print_hw_transfer_message_once() {
static bool printed = false;
if (!printed) {
printf("INFO: Performing HW frame transfer (decoder output context to encoder input context).\n");
printed = true;
}
}
static int encode_write_frame(AVFrame *frame_to_encode, unsigned int stream_index_ignored) {
int ret;
AVPacket *pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
return AVERROR(ENOMEM);
}
if (frame_to_encode) {
if (frame_to_encode->pts == AV_NOPTS_VALUE) {
// This shouldn't be strictly necessary if PTS propagation is correct
// but as a safeguard for libx264 which needs PTS.
frame_to_encode->pts = av_rescale_q(encoded_frame_count, enc_ctx->time_base, enc_ctx->time_base);
printf("Generated PTS %"PRId64" for frame %"PRId64" (encoder timebase %d/%d)\n",
frame_to_encode->pts, encoded_frame_count, enc_ctx->time_base.num, enc_ctx->time_base.den);
}
}
ret = avcodec_send_frame(enc_ctx, frame_to_encode);
if (ret < 0) {
if (frame_to_encode) { // Don't log error for NULL frame during flushing if it's just EAGAIN/EOF
fprintf(stderr, "Error sending a frame for encoding: %s\n", av_err2str(ret));
} else if (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)) {
fprintf(stderr, "Error sending a NULL frame for flushing: %s\n", av_err2str(ret));
}
av_packet_free(&pkt);
return ret;
}
while (true) { // Changed from ret >=0 to handle EAGAIN properly
ret = avcodec_receive_packet(enc_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
// AVERROR_EOF from receive_packet is normal when flushing (sent NULL frame)
// or if the encoder is done.
// For EAGAIN, it means encoder needs more input or is buffering.
break;
} else if (ret < 0) {
fprintf(stderr, "Error during encoding (receiving packet): %s\n", av_err2str(ret));
break; // Propagate error
}
// printf("Writing packet PTS %"PRId64", DTS %"PRId64", Size %d\n", pkt->pts, pkt->dts, pkt->size);
pkt->stream_index = out_stream->index;
av_packet_rescale_ts(pkt, enc_ctx->time_base, out_stream->time_base);
int write_ret = av_interleaved_write_frame(ofmt_ctx, pkt);
av_packet_unref(pkt); // Unref packet after use, before checking write_ret
if (write_ret < 0) {
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(write_ret));
ret = write_ret; // Propagate this error
break;
}
}
// Do not free pkt here if it's to be reused. But we alloc/free per call.
// If ret is AVERROR(EAGAIN) from receive_packet, it's not a fatal error for this function call.
// If ret is AVERROR_EOF from receive_packet, it means flushing is complete or encoder is done.
return (ret == AVERROR(EAGAIN)) ? 0 : ret; // Treat EAGAIN as non-fatal for this call
}
void cleanup() {
int flush_ret = 0;
if (enc_ctx) {
printf("Flushing encoder in cleanup...\n");
flush_ret = encode_write_frame(NULL, 0);
if (flush_ret < 0 && flush_ret != AVERROR_EOF) { // AVERROR_EOF is expected from flushing
fprintf(stderr, "Error during final encoder flush: %s\n", av_err2str(flush_ret));
if (final_ret == 0) final_ret = flush_ret; // Capture error if no other error occurred
}
avcodec_free_context(&enc_ctx);
enc_ctx = NULL;
}
if (dec_ctx) {
avcodec_free_context(&dec_ctx);
dec_ctx = NULL;
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE) && ofmt_ctx->pb) {
avio_closep(&ofmt_ctx->pb); // This can also return errors
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
ofmt_ctx = NULL;
}
if (ifmt_ctx) {
avformat_close_input(&ifmt_ctx);
ifmt_ctx = NULL;
}
if (hw_device_ctx) {
av_buffer_unref(&hw_device_ctx);
hw_device_ctx = NULL;
}
printf("Cleaned up resources.\n");
}
int main(int argc, char **argv) {
// int ret = 0; // Local ret for operations
AVFrame *frame = NULL;
AVFrame *transfer_frame = NULL;
AVPacket *pkt_in = NULL;
enum AVHWDeviceType hw_type = AV_HWDEVICE_TYPE_NONE;
const char *encoder_name = NULL;
char filter_descr[512];
int use_gpu = 0;
final_ret = 0; // Initialize global final return code
if (argc > 1 && strcmp(argv[1], "gpu") == 0) {
use_gpu = 1;
printf("Using GPU (h264_nvenc) path.\n");
} else {
printf("Using CPU (libx264) path. Use 'gpu' argument for NVENC.\n");
}
av_log_set_level(AV_LOG_DEBUG);
avdevice_register_all();
if (use_gpu) {
encoder_name = "h264_nvenc";
hw_type = AV_HWDEVICE_TYPE_D3D11VA;
if ((final_ret = av_hwdevice_ctx_create(&hw_device_ctx, hw_type, NULL, NULL, 0)) < 0) {
fprintf(stderr, "Failed to create D3D11VA HW device: %s\n", av_err2str(final_ret));
goto end;
}
printf("D3D11VA hardware device context created.\n");
snprintf(filter_descr, sizeof(filter_descr), "ddagrab=0:framerate=%d:output_format=d3d11", CAPTURE_FRAMERATE);
} else {
encoder_name = "libx264";
snprintf(filter_descr, sizeof(filter_descr), "ddagrab=0:framerate=%d,hwdownload,format=bgra,format=yuv420p", CAPTURE_FRAMERATE);
}
const AVInputFormat *lavfi_ifmt = av_find_input_format("lavfi");
if (!lavfi_ifmt) {
fprintf(stderr, "Failed to find lavfi input format. FFmpeg build issue?\n");
final_ret = AVERROR_MUXER_NOT_FOUND;
goto end;
}
printf("Opening input with filter: %s\n", filter_descr);
AVDictionary *input_opts = NULL;
if ((final_ret = avformat_open_input(&ifmt_ctx, filter_descr, lavfi_ifmt, &input_opts)) < 0) {
fprintf(stderr, "Cannot open input with lavfi: '%s': %s\n", filter_descr, av_err2str(final_ret));
av_dict_free(&input_opts);
goto end;
}
av_dict_free(&input_opts);
if ((final_ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
fprintf(stderr, "Cannot find stream information: %s\n", av_err2str(final_ret));
goto end;
}
video_stream_index = -1;
for (unsigned int i = 0; i < ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index < 0) {
fprintf(stderr, "No video stream found in lavfi input\n"); final_ret = AVERROR_STREAM_NOT_FOUND; goto end;
}
AVCodecParameters *in_codecpar = ifmt_ctx->streams[video_stream_index]->codecpar;
printf("Input video from lavfi: %dx%d, format: %s (%d), codec_id: %s (%d), time_base: %d/%d\n",
in_codecpar->width, in_codecpar->height,
av_get_pix_fmt_name(in_codecpar->format), in_codecpar->format,
avcodec_get_name(in_codecpar->codec_id), in_codecpar->codec_id,
ifmt_ctx->streams[video_stream_index]->time_base.num, ifmt_ctx->streams[video_stream_index]->time_base.den);
const AVCodec *decoder = avcodec_find_decoder(in_codecpar->codec_id);
if (!decoder) {
fprintf(stderr, "Failed to find decoder for %s (expected rawvideo)\n", avcodec_get_name(in_codecpar->codec_id));
final_ret = AVERROR_DECODER_NOT_FOUND; goto end;
}
dec_ctx = avcodec_alloc_context3(decoder);
if (!dec_ctx) { final_ret = AVERROR(ENOMEM); goto end; }
if((final_ret = avcodec_parameters_to_context(dec_ctx, in_codecpar)) < 0) {
fprintf(stderr, "Failed to copy codec parameters to decoder context: %s\n", av_err2str(final_ret));
goto end;
}
dec_ctx->framerate = av_make_q(CAPTURE_FRAMERATE, 1);
dec_ctx->time_base = ifmt_ctx->streams[video_stream_index]->time_base;
if (use_gpu && in_codecpar->format == AV_PIX_FMT_D3D11) {
dec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
if (!dec_ctx->hw_device_ctx) { final_ret = AVERROR(ENOMEM); goto end; }
printf("Rawvideo decoder configured with D3D11VA device context for D3D11 input.\n");
}
if ((final_ret = avcodec_open2(dec_ctx, decoder, NULL)) < 0) {
fprintf(stderr, "Failed to open lavfi output decoder: %s\n", av_err2str(final_ret)); goto end;
}
printf("Decoder for lavfi output (%s) opened.\n", decoder->name);
if((final_ret = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, OUTPUT_FILENAME)) < 0) {
fprintf(stderr, "Could not create output context: %s\n", av_err2str(final_ret)); goto end;
}
const AVCodec *encoder = avcodec_find_encoder_by_name(encoder_name);
if (!encoder) { fprintf(stderr, "Encoder %s not found\n", encoder_name); final_ret = AVERROR_INVALIDDATA; goto end; }
enc_ctx = avcodec_alloc_context3(encoder);
if (!enc_ctx) { fprintf(stderr, "Failed to allocate encoder context\n"); final_ret = AVERROR(ENOMEM); goto end; }
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio.num ? dec_ctx->sample_aspect_ratio : (AVRational){1,1};
enc_ctx->time_base = av_make_q(1, CAPTURE_FRAMERATE);
enc_ctx->framerate = av_make_q(CAPTURE_FRAMERATE, 1);
if (use_gpu) {
enc_ctx->pix_fmt = AV_PIX_FMT_D3D11;
enc_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
if (!enc_ctx->hw_device_ctx) { final_ret = AVERROR(ENOMEM); goto end; }
AVBufferRef *enc_hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
if (!enc_hw_frames_ref) { final_ret = AVERROR(ENOMEM); goto end; }
AVHWFramesContext *enc_frames_ctx_data = (AVHWFramesContext*)(enc_hw_frames_ref->data);
enc_frames_ctx_data->format = AV_PIX_FMT_D3D11;
enc_frames_ctx_data->sw_format = AV_PIX_FMT_NV12;
enc_frames_ctx_data->width = enc_ctx->width;
enc_frames_ctx_data->height = enc_ctx->height;
enc_frames_ctx_data->initial_pool_size = 20;
if ((final_ret = av_hwframe_ctx_init(enc_hw_frames_ref)) < 0) {
fprintf(stderr, "Failed to initialize encoder HW frames context: %s\n", av_err2str(final_ret));
av_buffer_unref(&enc_hw_frames_ref); goto end;
}
enc_ctx->hw_frames_ctx = enc_hw_frames_ref;
av_opt_set_int(enc_ctx->priv_data, "cq", 20, 0);
} else {
enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
av_opt_set(enc_ctx->priv_data, "crf", "20", 0);
av_opt_set(enc_ctx->priv_data, "preset", "ultrafast", 0);
av_opt_set(enc_ctx->priv_data, "tune", "zerolatency", 0);
}
enc_ctx->gop_size = CAPTURE_FRAMERATE * 2;
enc_ctx->max_b_frames = use_gpu ? 2 : 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
if ((final_ret = avcodec_open2(enc_ctx, encoder, NULL)) < 0) {
fprintf(stderr, "Cannot open video encoder '%s': %s\n", encoder->name, av_err2str(final_ret)); goto end;
}
printf("Encoder %s opened.\n", encoder_name);
out_stream = avformat_new_stream(ofmt_ctx, encoder);
if (!out_stream) { fprintf(stderr, "Failed allocating output stream\n"); final_ret = AVERROR_UNKNOWN; goto end; }
out_stream->time_base = enc_ctx->time_base;
if((final_ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx)) < 0) {
fprintf(stderr, "Failed to copy codec parameters to output stream: %s\n", av_err2str(final_ret));
goto end;
}
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
if ((final_ret = avio_open(&ofmt_ctx->pb, OUTPUT_FILENAME, AVIO_FLAG_WRITE)) < 0) {
fprintf(stderr, "Could not open output '%s': %s\n", OUTPUT_FILENAME, av_err2str(final_ret)); goto end;
}
}
if ((final_ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {
fprintf(stderr, "Error writing header: %s\n", av_err2str(final_ret));
goto end;
}
printf("Output file header written to %s.\n", OUTPUT_FILENAME);
frame = av_frame_alloc();
if (use_gpu) transfer_frame = av_frame_alloc();
pkt_in = av_packet_alloc();
if (!frame || (use_gpu && !transfer_frame) || !pkt_in) {
fprintf(stderr, "Could not allocate frame/packet\n"); final_ret = AVERROR(ENOMEM); goto end;
}
printf("Starting capture (max ~%d s)...\n", CAPTURE_DURATION_SEC);
bool active_processing = true;
int read_frame_ret = 0;
while (active_processing) {
read_frame_ret = av_read_frame(ifmt_ctx, pkt_in);
if (read_frame_ret < 0) {
if (read_frame_ret == AVERROR_EOF) {
printf("av_read_frame: End of file from lavfi normally.\n");
} else {
fprintf(stderr, "av_read_frame: Error reading packet from lavfi: %s\n", av_err2str(read_frame_ret));
}
final_ret = read_frame_ret; // Store this error to determine overall success
active_processing = false;
// No break here, let the flushing logic run for decoder based on this EOF/error
}
if (active_processing && pkt_in->stream_index == video_stream_index) {
printf("Read packet from stream %d, PTS %"PRId64", DTS %"PRId64"\n", pkt_in->stream_index, pkt_in->pts, pkt_in->dts);
if (pkt_in->pts != AV_NOPTS_VALUE) {
if (last_input_pts != AV_NOPTS_VALUE && pkt_in->pts <= last_input_pts) {
printf("Warning: Non-monotonic input PTS detected (%"PRId64" -> %"PRId64"). Adjusting offset.\n", last_input_pts, pkt_in->pts);
pts_offset += (last_input_pts - pkt_in->pts + 1);
}
pkt_in->pts += pts_offset;
last_input_pts = pkt_in->pts;
} else {
printf("Warning: Input packet has no PTS.\n");
}
if (pkt_in->dts == AV_NOPTS_VALUE && pkt_in->pts != AV_NOPTS_VALUE) pkt_in->dts = pkt_in->pts;
int send_ret = avcodec_send_packet(dec_ctx, pkt_in);
if (send_ret < 0) {
fprintf(stderr, "Error sending packet to lavfi output decoder: %s\n", av_err2str(send_ret));
final_ret = send_ret;
active_processing = false;
}
// Inner loop to receive all decoded frames from the sent packet
while (active_processing && send_ret >=0) {
int receive_ret = avcodec_receive_frame(dec_ctx, frame);
if (receive_ret == AVERROR(EAGAIN)) { // Decoder needs more packets
break;
} else if (receive_ret == AVERROR_EOF) { // Decoder fully flushed for this input
printf("Decoder EOF reached while receiving frames.\n");
active_processing = false; // No more frames will come from current input stream state
break;
} else if (receive_ret < 0) {
fprintf(stderr, "Error receiving frame from lavfi output decoder: %s\n", av_err2str(receive_ret));
final_ret = receive_ret;
active_processing = false;
break;
}
printf("Received frame from decoder, format %s, PTS %"PRId64"\n", av_get_pix_fmt_name(frame->format), frame->pts);
if (frame->pts != AV_NOPTS_VALUE) {
frame->pts = av_rescale_q(frame->pts, dec_ctx->time_base, enc_ctx->time_base);
} else {
// Generate PTS if missing, though ideally rawvideo decoder preserves it
frame->pts = av_rescale_q(encoded_frame_count, (AVRational){1, CAPTURE_FRAMERATE}, enc_ctx->time_base);
printf("Generated fallback PTS %"PRId64" for encoder.\n", frame->pts);
}
AVFrame *frame_to_encode = frame;
if (use_gpu) { // GPU Path HW transfer logic
if (frame->format != AV_PIX_FMT_D3D11) {
fprintf(stderr, "Decoded frame is %s, not AV_PIX_FMT_D3D11 for GPU path!\n", av_get_pix_fmt_name(frame->format));
final_ret = AVERROR_INVALIDDATA; active_processing = false; break;
}
if (!frame->hw_frames_ctx) {
fprintf(stderr, "Decoded D3D11 frame is missing its own hw_frames_ctx!\n");
final_ret = AVERROR_INVALIDDATA; active_processing = false; break;
}
if (!enc_ctx->hw_frames_ctx) {
fprintf(stderr, "Encoder hw_frames_ctx is NULL.\n");
final_ret = AVERROR_INVALIDDATA; active_processing = false; break;
}
if (frame->hw_frames_ctx->data != enc_ctx->hw_frames_ctx->data) {
print_hw_transfer_message_once();
av_frame_unref(transfer_frame);
if (av_hwframe_get_buffer(enc_ctx->hw_frames_ctx, transfer_frame, 0) < 0) {
fprintf(stderr, "Failed to get buffer from encoder's hw_frames_ctx for transfer\n");
final_ret = AVERROR_UNKNOWN; active_processing = false; break;
}
if (av_hwframe_transfer_data(transfer_frame, frame, 0) < 0) {
fprintf(stderr, "Failed to transfer HW frame data\n");
av_frame_unref(transfer_frame);
final_ret = AVERROR_UNKNOWN; active_processing = false; break;
}
transfer_frame->pts = frame->pts;
transfer_frame->sample_aspect_ratio = frame->sample_aspect_ratio;
frame_to_encode = transfer_frame;
}
}
frame_to_encode->pict_type = AV_PICTURE_TYPE_NONE;
int encode_ret = encode_write_frame(frame_to_encode, 0);
if (encode_ret < 0 && encode_ret != AVERROR_EOF) { // EOF from encode_write_frame means encoder flushed
fprintf(stderr, "encode_write_frame failed.\n");
final_ret = encode_ret;
active_processing = false; // Stop all processing on encoding error
}
if (frame_to_encode == transfer_frame) av_frame_unref(transfer_frame);
av_frame_unref(frame); // Unref frame from decoder
if (active_processing) encoded_frame_count++; // Only count if successfully processed
if (encoded_frame_count >= CAPTURE_FRAMERATE * CAPTURE_DURATION_SEC) {
printf("Reached ~%ds capture limit (%"PRId64" frames).\n", CAPTURE_DURATION_SEC, encoded_frame_count);
active_processing = false; // Signal to stop reading more input
final_ret = 0; // Reaching limit is a success for this part
}
} // End inner loop (receive_frame)
} // End if video_stream_index
if (pkt_in->data) av_packet_unref(pkt_in); // Unref packet from av_read_frame
if (read_frame_ret == AVERROR_EOF && active_processing) {
// If av_read_frame returned EOF but we are still active_processing (e.g. limit not hit)
// then this means true end of input.
active_processing = false;
}
} // End outer loop (read_frame)
printf("Main processing loop finished. Encoded %"PRId64" frames. Final_ret before flush: %d\n", encoded_frame_count, final_ret);
// --- FLUSH DECODER ---
// Only if av_read_frame didn't already signal EOF to it via NULL packet.
// Actually, always send NULL to ensure it's flushed.
printf("Flushing lavfi output decoder (sending NULL packet)...\n");
int send_ret = avcodec_send_packet(dec_ctx, NULL); // Send NULL packet to flush
if (send_ret < 0 && send_ret != AVERROR_EOF) { // EOF means already flushed
fprintf(stderr, "Error sending NULL packet to flush decoder: %s\n", av_err2str(send_ret));
if (final_ret == 0 || final_ret == AVERROR_EOF) final_ret = send_ret;
}
bool flushing_decoder = true;
while(flushing_decoder) {
int receive_ret = avcodec_receive_frame(dec_ctx, frame);
if (receive_ret == AVERROR_EOF || receive_ret == AVERROR(EAGAIN)) {
if(receive_ret == AVERROR_EOF) printf("Decoder fully flushed.\n");
break;
}
if (receive_ret < 0) {
fprintf(stderr, "Error flushing lavfi output decoder (receiving frame): %s\n", av_err2str(receive_ret));
if (final_ret == 0 || final_ret == AVERROR_EOF) final_ret = receive_ret;
break;
}
printf("Received flushed frame from decoder, PTS %"PRId64"\n", frame->pts);
if (frame->pts != AV_NOPTS_VALUE) {
frame->pts = av_rescale_q(frame->pts, dec_ctx->time_base, enc_ctx->time_base);
} else {
frame->pts = av_rescale_q(encoded_frame_count, (AVRational){1, CAPTURE_FRAMERATE}, enc_ctx->time_base);
}
AVFrame* frame_to_encode = frame;
if (use_gpu) { // GPU Path HW transfer logic for flushed frames
if (frame->format == AV_PIX_FMT_D3D11 && frame->hw_frames_ctx && enc_ctx->hw_frames_ctx &&
frame->hw_frames_ctx->data != enc_ctx->hw_frames_ctx->data) {
print_hw_transfer_message_once();
av_frame_unref(transfer_frame);
if (av_hwframe_get_buffer(enc_ctx->hw_frames_ctx, transfer_frame, 0) == 0 &&
av_hwframe_transfer_data(transfer_frame, frame, 0) == 0) {
transfer_frame->pts = frame->pts;
transfer_frame->sample_aspect_ratio = frame->sample_aspect_ratio;
frame_to_encode = transfer_frame;
} else {
fprintf(stderr, "HW transfer for flushed frame failed. Skipping this frame.\n");
av_frame_unref(frame);
if (frame_to_encode == transfer_frame) av_frame_unref(transfer_frame);
continue;
}
} else if (frame->format != AV_PIX_FMT_D3D11 && enc_ctx->pix_fmt == AV_PIX_FMT_D3D11) {
fprintf(stderr, "Flushed frame is %s, but encoder expects D3D11. Skipping.\n", av_get_pix_fmt_name(frame->format));
av_frame_unref(frame);
continue;
}
}
frame_to_encode->pict_type = AV_PICTURE_TYPE_NONE;
int encode_ret = encode_write_frame(frame_to_encode, 0);
if (encode_ret < 0 && encode_ret != AVERROR_EOF) {
fprintf(stderr, "encode_write_frame failed for flushed decoder frame.\n");
if (final_ret == 0 || final_ret == AVERROR_EOF) final_ret = encode_ret;
}
if (frame_to_encode == transfer_frame) av_frame_unref(transfer_frame);
av_frame_unref(frame);
if (encode_ret >= 0) encoded_frame_count++; // Count if successfully encoded
}
printf("Lavfi output decoder flushing complete. Total encoded frames: %"PRId64"\n", encoded_frame_count);
// Encoder flushing is handled by cleanup() which calls encode_write_frame(NULL)
printf("Encoder will be flushed during cleanup. Final_ret before trailer: %d\n", final_ret);
if (ofmt_ctx && ofmt_ctx->pb) { // Ensure ofmt_ctx and pb are valid
int trailer_ret = av_write_trailer(ofmt_ctx);
if (trailer_ret < 0) {
fprintf(stderr, "Error writing output trailer: %s\n", av_err2str(trailer_ret));
if (final_ret == 0 || final_ret == AVERROR_EOF) final_ret = trailer_ret;
} else {
printf("Output file trailer written.\n");
}
} else {
printf("Output format context or pb was null, skipping trailer.\n");
if (final_ret == 0) final_ret = AVERROR_UNKNOWN; // Indicate an issue if we expected to write trailer
}
printf("Encoding complete. Final_ret before cleanup: %d\n", final_ret);
end:
if (frame) av_frame_free(&frame);
if (transfer_frame) av_frame_free(&transfer_frame);
if (pkt_in) av_packet_free(&pkt_in);
cleanup(); // cleanup might modify final_ret if errors occur there
printf("Exiting with code: %d (%s)\n", final_ret, final_ret < 0 ? av_err2str(final_ret) : "Success or EOF");
return (final_ret < 0 && final_ret != AVERROR_EOF) ? 1 : 0; // Treat EOF from input as success overall if no other error
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment