|
package main |
|
|
|
// #cgo pkg-config: libavformat libavcodec libavfilter libavutil libavdevice |
|
// #cgo CFLAGS: -Wno-deprecated-declarations |
|
// #include <libavformat/avformat.h> |
|
// #include <libavcodec/avcodec.h> |
|
// #include <libavfilter/avfilter.h> |
|
// #include <libavutil/hwcontext.h> |
|
// #include <libavutil/hwcontext_d3d11va.h> // For D3D11VA types if needed by FFmpeg |
|
// #include <libavutil/opt.h> |
|
// #include <libavutil/pixdesc.h> |
|
// #include <libavutil/imgutils.h> |
|
// #include <libavdevice/avdevice.h> |
|
// #include <stdio.h> // For C.sprintf if ever needed, C.fprintf |
|
// #include <stdlib.h> // For C.free |
|
// #include <string.h> // For C.strcmp if ever needed |
|
// // Helper functions to get constants because C macros are not directly accessible |
|
// static int64_t get_av_nopts_value() { return AV_NOPTS_VALUE; } |
|
// static int get_av_error_eof() { return AVERROR_EOF; } |
|
// static int get_av_error_eagain() { return AVERROR(EAGAIN); } |
|
// static int get_av_error_enomem() { return AVERROR(ENOMEM); } |
|
// static int get_av_error_muxer_not_found() { return AVERROR_MUXER_NOT_FOUND; } |
|
// static int get_av_error_stream_not_found() { return AVERROR_STREAM_NOT_FOUND; } |
|
// static int get_av_error_decoder_not_found() { return AVERROR_DECODER_NOT_FOUND; } |
|
// static int get_av_error_invaliddata() { return AVERROR_INVALIDDATA; } |
|
// static int get_av_error_unknown() { return AVERROR_UNKNOWN; } |
|
import "C" |
|
import ( |
|
"fmt" |
|
"os" |
|
"unsafe" |
|
) |
|
|
|
const ( |
|
outputFilename = "output.mkv" |
|
captureFramerate = 30 |
|
captureDurationSec = 5 |
|
) |
|
|
|
// Global variables corresponding to the C static globals |
|
var ( |
|
ifmtCtx *C.AVFormatContext |
|
decCtx *C.AVCodecContext |
|
encCtx *C.AVCodecContext |
|
ofmtCtx *C.AVFormatContext |
|
outStream *C.AVStream |
|
hwDeviceCtx *C.AVBufferRef |
|
videoStreamIndex C.int = -1 |
|
encodedFrameCount C.int64_t = 0 |
|
ptsOffset C.int64_t = 0 |
|
lastInputPts C.int64_t = C.get_av_nopts_value() |
|
finalRet C.int = 0 |
|
) |
|
|
|
var hwTransferMessagePrinted = false |
|
|
|
func printHwTransferMessageOnce() { |
|
if !hwTransferMessagePrinted { |
|
fmt.Println("INFO: Performing HW frame transfer (decoder output context to encoder input context).") |
|
hwTransferMessagePrinted = true |
|
} |
|
} |
|
|
|
func avErr2Str(errnum C.int) string { |
|
errbuf := make([]C.char, C.AV_ERROR_MAX_STRING_SIZE) |
|
C.av_strerror(errnum, &errbuf[0], C.size_t(len(errbuf))) |
|
return C.GoString(&errbuf[0]) |
|
} |
|
|
|
func encodeWriteFrame(frameToEncode *C.AVFrame, streamIndexIgnored C.uint) C.int { |
|
var ret C.int |
|
pkt := C.av_packet_alloc() |
|
if pkt == nil { |
|
fmt.Fprintln(os.Stderr, "Could not allocate AVPacket") |
|
return C.get_av_error_enomem() |
|
} |
|
defer C.av_packet_free(&pkt) // Ensure packet is freed |
|
|
|
if frameToEncode != nil { |
|
if frameToEncode.pts == C.get_av_nopts_value() { |
|
// This shouldn't be strictly necessary if PTS propagation is correct |
|
// but as a safeguard for libx264 which needs PTS. |
|
frameToEncode.pts = C.av_rescale_q(encodedFrameCount, encCtx.time_base, encCtx.time_base) |
|
fmt.Printf("Generated PTS %d for frame %d (encoder timebase %d/%d)\n", |
|
frameToEncode.pts, encodedFrameCount, encCtx.time_base.num, encCtx.time_base.den) |
|
} |
|
} |
|
|
|
ret = C.avcodec_send_frame(encCtx, frameToEncode) |
|
if ret < 0 { |
|
if frameToEncode != nil { // Don't log error for NULL frame during flushing if it's just EAGAIN/EOF |
|
fmt.Fprintf(os.Stderr, "Error sending a frame for encoding: %s\n", avErr2Str(ret)) |
|
} else if ret != C.get_av_error_eof() && ret != C.get_av_error_eagain() { |
|
fmt.Fprintf(os.Stderr, "Error sending a NULL frame for flushing: %s\n", avErr2Str(ret)) |
|
} |
|
return ret |
|
} |
|
|
|
for { |
|
// pkt is unreffed and reused in this loop |
|
C.av_packet_unref(pkt) // Unref before receiving new packet data |
|
|
|
ret = C.avcodec_receive_packet(encCtx, pkt) |
|
if ret == C.get_av_error_eagain() || ret == C.get_av_error_eof() { |
|
break |
|
} else if ret < 0 { |
|
fmt.Fprintf(os.Stderr, "Error during encoding (receiving packet): %s\n", avErr2Str(ret)) |
|
break |
|
} |
|
|
|
pkt.stream_index = outStream.index |
|
C.av_packet_rescale_ts(pkt, encCtx.time_base, outStream.time_base) |
|
|
|
writeRet := C.av_interleaved_write_frame(ofmtCtx, pkt) |
|
// C.av_packet_unref(pkt) was here, moved up to be robust for reuse by avcodec_receive_packet |
|
if writeRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error while writing output packet: %s\n", avErr2Str(writeRet)) |
|
ret = writeRet |
|
break |
|
} |
|
} |
|
|
|
if ret == C.get_av_error_eagain() { |
|
return 0 // Treat EAGAIN as non-fatal for this call |
|
} |
|
return ret |
|
} |
|
|
|
func cleanup() { |
|
var flushRet C.int |
|
if encCtx != nil { |
|
fmt.Println("Flushing encoder in cleanup...") |
|
flushRet = encodeWriteFrame(nil, 0) // Pass NULL to flush |
|
if flushRet < 0 && flushRet != C.get_av_error_eof() { |
|
fmt.Fprintf(os.Stderr, "Error during final encoder flush: %s\n", avErr2Str(flushRet)) |
|
if finalRet == 0 { // Capture error if no other error occurred |
|
finalRet = flushRet |
|
} |
|
} |
|
C.avcodec_free_context(&encCtx) |
|
encCtx = nil |
|
} |
|
if decCtx != nil { |
|
C.avcodec_free_context(&decCtx) |
|
decCtx = nil |
|
} |
|
if ofmtCtx != nil && (ofmtCtx.oformat.flags&C.AVFMT_NOFILE) == 0 && ofmtCtx.pb != nil { |
|
C.avio_closep(&ofmtCtx.pb) // This can also return errors, not explicitly handled in C |
|
} |
|
if ofmtCtx != nil { |
|
C.avformat_free_context(ofmtCtx) |
|
ofmtCtx = nil |
|
} |
|
if ifmtCtx != nil { |
|
C.avformat_close_input(&ifmtCtx) // Note: C version uses this, which also frees ifmt_ctx |
|
ifmtCtx = nil |
|
} |
|
if hwDeviceCtx != nil { |
|
C.av_buffer_unref(&hwDeviceCtx) |
|
hwDeviceCtx = nil |
|
} |
|
fmt.Println("Cleaned up resources.") |
|
} |
|
|
|
func run() C.int { |
|
var frame, transferFrame *C.AVFrame |
|
var pktIn *C.AVPacket |
|
// Initialize frame, transferFrame, pktIn to nil so deferred frees are safe if alloc fails |
|
frame = nil |
|
transferFrame = nil |
|
pktIn = nil |
|
|
|
defer func() { |
|
if frame != nil { |
|
C.av_frame_free(&frame) |
|
} |
|
if transferFrame != nil { |
|
C.av_frame_free(&transferFrame) |
|
} |
|
if pktIn != nil { |
|
C.av_packet_free(&pktIn) |
|
} |
|
}() |
|
|
|
var hwType C.enum_AVHWDeviceType = C.AV_HWDEVICE_TYPE_NONE |
|
var encoderNameC *C.char |
|
var useGPU bool |
|
|
|
finalRet = 0 // Initialize global final return code |
|
|
|
if len(os.Args) > 1 && os.Args[1] == "gpu" { |
|
useGPU = true |
|
fmt.Println("Using GPU (h264_nvenc) path.") |
|
} else { |
|
fmt.Println("Using CPU (libx264) path. Use 'gpu' argument for NVENC.") |
|
} |
|
|
|
C.av_log_set_level(C.AV_LOG_DEBUG) |
|
C.avdevice_register_all() |
|
|
|
var filterDescrC *C.char |
|
if useGPU { |
|
encoderNameStr := "h264_nvenc" |
|
encoderNameC = C.CString(encoderNameStr) |
|
defer C.free(unsafe.Pointer(encoderNameC)) |
|
|
|
hwType = C.AV_HWDEVICE_TYPE_D3D11VA |
|
finalRet = C.av_hwdevice_ctx_create(&hwDeviceCtx, hwType, nil, nil, 0) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Failed to create D3D11VA HW device: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
fmt.Println("D3D11VA hardware device context created.") |
|
filterDescrStr := fmt.Sprintf("ddagrab=0:framerate=%d:output_format=d3d11", captureFramerate) |
|
filterDescrC = C.CString(filterDescrStr) |
|
defer C.free(unsafe.Pointer(filterDescrC)) |
|
} else { |
|
encoderNameStr := "libx264" |
|
encoderNameC = C.CString(encoderNameStr) |
|
defer C.free(unsafe.Pointer(encoderNameC)) |
|
filterDescrStr := fmt.Sprintf("ddagrab=0:framerate=%d,hwdownload,format=bgra,format=yuv420p", captureFramerate) |
|
filterDescrC = C.CString(filterDescrStr) |
|
defer C.free(unsafe.Pointer(filterDescrC)) |
|
} |
|
|
|
lavfiIfmtName := C.CString("lavfi") |
|
defer C.free(unsafe.Pointer(lavfiIfmtName)) |
|
lavfiIfmt := C.av_find_input_format(lavfiIfmtName) |
|
if lavfiIfmt == nil { |
|
fmt.Fprintln(os.Stderr, "Failed to find lavfi input format. FFmpeg build issue?") |
|
finalRet = C.get_av_error_muxer_not_found() |
|
return finalRet |
|
} |
|
|
|
fmt.Printf("Opening input with filter: %s\n", C.GoString(filterDescrC)) |
|
var inputOpts *C.AVDictionary // Automatically nil |
|
finalRet = C.avformat_open_input(&ifmtCtx, filterDescrC, lavfiIfmt, &inputOpts) |
|
C.av_dict_free(&inputOpts) // Free even if open_input fails, as per docs/examples |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Cannot open input with lavfi: '%s': %s\n", C.GoString(filterDescrC), avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
|
|
finalRet = C.avformat_find_stream_info(ifmtCtx, nil) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Cannot find stream information: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
|
|
videoStreamIndex = -1 |
|
for i := C.uint(0); i < ifmtCtx.nb_streams; i++ { |
|
streamPtrs := ifmtCtx.streams // **AVStream |
|
currentStreamPtr := (**C.AVStream)(unsafe.Pointer(uintptr(unsafe.Pointer(streamPtrs)) + uintptr(i)*unsafe.Sizeof(*streamPtrs))) // *AVStream at index i |
|
currentStream := *currentStreamPtr // AVStream at index i |
|
|
|
if currentStream.codecpar.codec_type == C.AVMEDIA_TYPE_VIDEO { |
|
videoStreamIndex = C.int(i) |
|
break |
|
} |
|
} |
|
if videoStreamIndex < 0 { |
|
fmt.Fprintln(os.Stderr, "No video stream found in lavfi input") |
|
finalRet = C.get_av_error_stream_not_found() |
|
return finalRet |
|
} |
|
|
|
inputStreamPtrs := ifmtCtx.streams |
|
inputStreamPtr := (**C.AVStream)(unsafe.Pointer(uintptr(unsafe.Pointer(inputStreamPtrs)) + uintptr(videoStreamIndex)*unsafe.Sizeof(*inputStreamPtrs))) |
|
inputStream := *inputStreamPtr |
|
inCodecpar := inputStream.codecpar |
|
|
|
fmt.Printf("Input video from lavfi: %dx%d, format: %s (%d), codec_id: %s (%d), time_base: %d/%d\n", |
|
inCodecpar.width, inCodecpar.height, |
|
C.GoString(C.av_get_pix_fmt_name(C.enum_AVPixelFormat(inCodecpar.format))), C.enum_AVPixelFormat(inCodecpar.format), // CORRECTED HERE |
|
C.GoString(C.avcodec_get_name(inCodecpar.codec_id)), inCodecpar.codec_id, |
|
inputStream.time_base.num, inputStream.time_base.den) |
|
|
|
decoder := C.avcodec_find_decoder(inCodecpar.codec_id) |
|
if decoder == nil { |
|
fmt.Fprintf(os.Stderr, "Failed to find decoder for %s (expected rawvideo)\n", C.GoString(C.avcodec_get_name(inCodecpar.codec_id))) |
|
finalRet = C.get_av_error_decoder_not_found() |
|
return finalRet |
|
} |
|
decCtx = C.avcodec_alloc_context3(decoder) |
|
if decCtx == nil { |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
finalRet = C.avcodec_parameters_to_context(decCtx, inCodecpar) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Failed to copy codec parameters to decoder context: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
decCtx.framerate = C.av_make_q(C.int(captureFramerate), 1) |
|
decCtx.time_base = inputStream.time_base |
|
|
|
if useGPU && inCodecpar.format == C.AV_PIX_FMT_D3D11 { // This comparison is C.enum_AVPixelFormat == C.enum_AVPixelFormat, OK |
|
decCtx.hw_device_ctx = C.av_buffer_ref(hwDeviceCtx) |
|
if decCtx.hw_device_ctx == nil { |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
fmt.Println("Rawvideo decoder configured with D3D11VA device context for D3D11 input.") |
|
} |
|
|
|
finalRet = C.avcodec_open2(decCtx, decoder, nil) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Failed to open lavfi output decoder: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
fmt.Printf("Decoder for lavfi output (%s) opened.\n", C.GoString(decoder.name)) |
|
|
|
outputFilenameC := C.CString(outputFilename) |
|
defer C.free(unsafe.Pointer(outputFilenameC)) |
|
finalRet = C.avformat_alloc_output_context2(&ofmtCtx, nil, nil, outputFilenameC) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Could not create output context: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
|
|
encoder := C.avcodec_find_encoder_by_name(encoderNameC) |
|
if encoder == nil { |
|
fmt.Fprintf(os.Stderr, "Encoder %s not found\n", C.GoString(encoderNameC)) |
|
finalRet = C.get_av_error_invaliddata() |
|
return finalRet |
|
} |
|
encCtx = C.avcodec_alloc_context3(encoder) |
|
if encCtx == nil { |
|
fmt.Fprintln(os.Stderr, "Failed to allocate encoder context") |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
|
|
encCtx.height = decCtx.height |
|
encCtx.width = decCtx.width |
|
if decCtx.sample_aspect_ratio.num != 0 { |
|
encCtx.sample_aspect_ratio = decCtx.sample_aspect_ratio |
|
} else { |
|
encCtx.sample_aspect_ratio = C.AVRational{num: 1, den: 1} |
|
} |
|
encCtx.time_base = C.av_make_q(1, C.int(captureFramerate)) |
|
encCtx.framerate = C.av_make_q(C.int(captureFramerate), 1) |
|
|
|
if useGPU { |
|
encCtx.pix_fmt = C.AV_PIX_FMT_D3D11 // C.enum_AVPixelFormat = C.enum_AVPixelFormat, OK |
|
encCtx.hw_device_ctx = C.av_buffer_ref(hwDeviceCtx) |
|
if encCtx.hw_device_ctx == nil { |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
|
|
encHwFramesRef := C.av_hwframe_ctx_alloc(hwDeviceCtx) |
|
if encHwFramesRef == nil { |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
encFramesCtxData := (*C.AVHWFramesContext)(unsafe.Pointer(encHwFramesRef.data)) |
|
encFramesCtxData.format = C.AV_PIX_FMT_D3D11 // C.enum_AVPixelFormat = C.enum_AVPixelFormat, OK |
|
encFramesCtxData.sw_format = C.AV_PIX_FMT_NV12 // C.enum_AVPixelFormat = C.enum_AVPixelFormat, OK |
|
encFramesCtxData.width = encCtx.width |
|
encFramesCtxData.height = encCtx.height |
|
encFramesCtxData.initial_pool_size = 20 |
|
initRet := C.av_hwframe_ctx_init(encHwFramesRef) |
|
if initRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Failed to initialize encoder HW frames context: %s\n", avErr2Str(initRet)) |
|
C.av_buffer_unref(&encHwFramesRef) |
|
finalRet = initRet |
|
return finalRet |
|
} |
|
encCtx.hw_frames_ctx = encHwFramesRef |
|
|
|
optNameCQ := C.CString("cq") |
|
defer C.free(unsafe.Pointer(optNameCQ)) |
|
C.av_opt_set_int(encCtx.priv_data, optNameCQ, 20, 0) |
|
} else { |
|
encCtx.pix_fmt = C.AV_PIX_FMT_YUV420P // C.enum_AVPixelFormat = C.enum_AVPixelFormat, OK |
|
optNameCRF := C.CString("crf") |
|
optValCRF := C.CString("20") |
|
defer C.free(unsafe.Pointer(optNameCRF)) |
|
defer C.free(unsafe.Pointer(optValCRF)) |
|
C.av_opt_set(encCtx.priv_data, optNameCRF, optValCRF, 0) |
|
|
|
optNamePreset := C.CString("preset") |
|
optValPreset := C.CString("ultrafast") |
|
defer C.free(unsafe.Pointer(optNamePreset)) |
|
defer C.free(unsafe.Pointer(optValPreset)) |
|
C.av_opt_set(encCtx.priv_data, optNamePreset, optValPreset, 0) |
|
|
|
optNameTune := C.CString("tune") |
|
optValTune := C.CString("zerolatency") |
|
defer C.free(unsafe.Pointer(optNameTune)) |
|
defer C.free(unsafe.Pointer(optValTune)) |
|
C.av_opt_set(encCtx.priv_data, optNameTune, optValTune, 0) |
|
} |
|
encCtx.gop_size = C.int(captureFramerate * 2) |
|
if useGPU { |
|
encCtx.max_b_frames = 2 |
|
} else { |
|
encCtx.max_b_frames = 0 |
|
} |
|
|
|
if (ofmtCtx.oformat.flags & C.AVFMT_GLOBALHEADER) != 0 { |
|
encCtx.flags |= C.AV_CODEC_FLAG_GLOBAL_HEADER |
|
} |
|
finalRet = C.avcodec_open2(encCtx, encoder, nil) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Cannot open video encoder '%s': %s\n", C.GoString(encoder.name), avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
fmt.Printf("Encoder %s opened.\n", C.GoString(encoderNameC)) |
|
|
|
outStream = C.avformat_new_stream(ofmtCtx, encoder) |
|
if outStream == nil { |
|
fmt.Fprintln(os.Stderr, "Failed allocating output stream") |
|
finalRet = C.get_av_error_unknown() |
|
return finalRet |
|
} |
|
outStream.time_base = encCtx.time_base |
|
finalRet = C.avcodec_parameters_from_context(outStream.codecpar, encCtx) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Failed to copy codec parameters to output stream: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
|
|
if (ofmtCtx.oformat.flags & C.AVFMT_NOFILE) == 0 { |
|
finalRet = C.avio_open(&ofmtCtx.pb, outputFilenameC, C.AVIO_FLAG_WRITE) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Could not open output '%s': %s\n", outputFilename, avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
} |
|
finalRet = C.avformat_write_header(ofmtCtx, nil) |
|
if finalRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error writing header: %s\n", avErr2Str(finalRet)) |
|
return finalRet |
|
} |
|
fmt.Printf("Output file header written to %s.\n", outputFilename) |
|
|
|
frame = C.av_frame_alloc() |
|
if useGPU { |
|
transferFrame = C.av_frame_alloc() |
|
} |
|
pktIn = C.av_packet_alloc() |
|
if frame == nil || (useGPU && transferFrame == nil) || pktIn == nil { |
|
fmt.Fprintln(os.Stderr, "Could not allocate frame/packet") |
|
finalRet = C.get_av_error_enomem() |
|
return finalRet |
|
} |
|
|
|
fmt.Printf("Starting capture (max ~%d s)...\n", captureDurationSec) |
|
activeProcessing := true |
|
var readFrameRet C.int |
|
|
|
processingLoop: |
|
for activeProcessing { |
|
readFrameRet = C.av_read_frame(ifmtCtx, pktIn) |
|
if readFrameRet < 0 { |
|
if readFrameRet == C.get_av_error_eof() { |
|
fmt.Println("av_read_frame: End of file from lavfi normally.") |
|
} else { |
|
fmt.Fprintf(os.Stderr, "av_read_frame: Error reading packet from lavfi: %s\n", avErr2Str(readFrameRet)) |
|
} |
|
finalRet = readFrameRet |
|
activeProcessing = false |
|
} |
|
|
|
if activeProcessing && pktIn.stream_index == videoStreamIndex { |
|
// fmt.Printf("Read packet from stream %d, PTS %d, DTS %d\n", pktIn.stream_index, pktIn.pts, pktIn.dts) |
|
if pktIn.pts != C.get_av_nopts_value() { |
|
if lastInputPts != C.get_av_nopts_value() && pktIn.pts <= lastInputPts { |
|
fmt.Printf("Warning: Non-monotonic input PTS detected (%d -> %d). Adjusting offset.\n", lastInputPts, pktIn.pts) |
|
ptsOffset += (lastInputPts - pktIn.pts + 1) |
|
} |
|
pktIn.pts += ptsOffset |
|
lastInputPts = pktIn.pts |
|
} else { |
|
fmt.Println("Warning: Input packet has no PTS.") |
|
} |
|
if pktIn.dts == C.get_av_nopts_value() && pktIn.pts != C.get_av_nopts_value() { |
|
pktIn.dts = pktIn.pts |
|
} |
|
|
|
sendRet := C.avcodec_send_packet(decCtx, pktIn) |
|
if sendRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error sending packet to lavfi output decoder: %s\n", avErr2Str(sendRet)) |
|
finalRet = sendRet |
|
activeProcessing = false |
|
} |
|
|
|
for activeProcessing && sendRet >= 0 { |
|
C.av_frame_unref(frame) |
|
if useGPU { C.av_frame_unref(transferFrame) } |
|
|
|
receiveRet := C.avcodec_receive_frame(decCtx, frame) |
|
if receiveRet == C.get_av_error_eagain() { |
|
break |
|
} else if receiveRet == C.get_av_error_eof() { |
|
fmt.Println("Decoder EOF reached while receiving frames.") |
|
activeProcessing = false |
|
break |
|
} else if receiveRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error receiving frame from lavfi output decoder: %s\n", avErr2Str(receiveRet)) |
|
finalRet = receiveRet |
|
activeProcessing = false |
|
break |
|
} |
|
// The following line was reported in error, ensure it's corrected or handled if uncommented |
|
// fmt.Printf("Received frame from decoder, format %s, PTS %d\n", C.GoString(C.av_get_pix_fmt_name(C.enum_AVPixelFormat(frame.format))), frame.pts) |
|
|
|
|
|
if frame.pts != C.get_av_nopts_value() { |
|
frame.pts = C.av_rescale_q(frame.pts, decCtx.time_base, encCtx.time_base) |
|
} else { |
|
frame.pts = C.av_rescale_q(encodedFrameCount, C.AVRational{num: 1, den: C.int(captureFramerate)}, encCtx.time_base) |
|
fmt.Printf("Generated fallback PTS %d for encoder.\n", frame.pts) |
|
} |
|
|
|
frameToEncode := frame |
|
if useGPU { |
|
if C.enum_AVPixelFormat(frame.format) != C.AV_PIX_FMT_D3D11 { // CORRECTED HERE |
|
fmt.Fprintf(os.Stderr, "Decoded frame is %s, not AV_PIX_FMT_D3D11 for GPU path!\n", C.GoString(C.av_get_pix_fmt_name(C.enum_AVPixelFormat(frame.format)))) |
|
finalRet = C.get_av_error_invaliddata() |
|
activeProcessing = false |
|
break |
|
} |
|
if frame.hw_frames_ctx == nil { |
|
fmt.Fprintln(os.Stderr, "Decoded D3D11 frame is missing its own hw_frames_ctx!") |
|
finalRet = C.get_av_error_invaliddata() |
|
activeProcessing = false |
|
break |
|
} |
|
if encCtx.hw_frames_ctx == nil { |
|
fmt.Fprintln(os.Stderr, "Encoder hw_frames_ctx is NULL.") |
|
finalRet = C.get_av_error_invaliddata() |
|
activeProcessing = false |
|
break |
|
} |
|
if frame.hw_frames_ctx.data != encCtx.hw_frames_ctx.data { |
|
printHwTransferMessageOnce() |
|
if C.av_hwframe_get_buffer(encCtx.hw_frames_ctx, transferFrame, 0) < 0 { |
|
fmt.Fprintln(os.Stderr, "Failed to get buffer from encoder's hw_frames_ctx for transfer") |
|
finalRet = C.get_av_error_unknown() |
|
activeProcessing = false |
|
break |
|
} |
|
if C.av_hwframe_transfer_data(transferFrame, frame, 0) < 0 { |
|
fmt.Fprintln(os.Stderr, "Failed to transfer HW frame data") |
|
finalRet = C.get_av_error_unknown() |
|
activeProcessing = false |
|
break |
|
} |
|
transferFrame.pts = frame.pts |
|
transferFrame.sample_aspect_ratio = frame.sample_aspect_ratio |
|
frameToEncode = transferFrame |
|
} |
|
} |
|
|
|
frameToEncode.pict_type = C.AV_PICTURE_TYPE_NONE |
|
encodeRet := encodeWriteFrame(frameToEncode, 0) |
|
if encodeRet < 0 && encodeRet != C.get_av_error_eof() { |
|
fmt.Fprintln(os.Stderr, "encode_write_frame failed.") |
|
finalRet = encodeRet |
|
activeProcessing = false |
|
} |
|
|
|
if activeProcessing { |
|
encodedFrameCount++ |
|
} |
|
|
|
if encodedFrameCount >= C.int64_t(captureFramerate*captureDurationSec) { |
|
fmt.Printf("Reached ~%ds capture limit (%d frames).\n", captureDurationSec, encodedFrameCount) |
|
activeProcessing = false |
|
if finalRet == C.get_av_error_eof() || finalRet == 0 { |
|
finalRet = 0 |
|
} |
|
break processingLoop |
|
} |
|
if !activeProcessing { break } |
|
} |
|
} |
|
|
|
if pktIn.data != nil { |
|
C.av_packet_unref(pktIn) |
|
} |
|
|
|
if readFrameRet == C.get_av_error_eof() && activeProcessing { |
|
activeProcessing = false |
|
} |
|
|
|
if !activeProcessing && finalRet < 0 && finalRet != C.get_av_error_eof() { |
|
break processingLoop |
|
} |
|
|
|
} |
|
|
|
fmt.Printf("Main processing loop finished. Encoded %d frames. Final_ret before flush: %d (%s)\n", |
|
encodedFrameCount, finalRet, avErr2Str(finalRet)) |
|
|
|
if decCtx != nil { |
|
fmt.Println("Flushing lavfi output decoder (sending NULL packet)...") |
|
sendRet := C.avcodec_send_packet(decCtx, nil) |
|
if sendRet < 0 && sendRet != C.get_av_error_eof() { |
|
fmt.Fprintf(os.Stderr, "Error sending NULL packet to flush decoder: %s\n", avErr2Str(sendRet)) |
|
if finalRet == 0 || finalRet == C.get_av_error_eof() { finalRet = sendRet } |
|
} |
|
|
|
for { |
|
C.av_frame_unref(frame) |
|
if useGPU { C.av_frame_unref(transferFrame) } |
|
|
|
receiveRet := C.avcodec_receive_frame(decCtx, frame) |
|
if receiveRet == C.get_av_error_eof() || receiveRet == C.get_av_error_eagain() { |
|
if receiveRet == C.get_av_error_eof() { fmt.Println("Decoder fully flushed.") } |
|
break |
|
} |
|
if receiveRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error flushing lavfi output decoder (receiving frame): %s\n", avErr2Str(receiveRet)) |
|
if finalRet == 0 || finalRet == C.get_av_error_eof() { finalRet = receiveRet } |
|
break |
|
} |
|
// fmt.Printf("Received flushed frame from decoder, PTS %d\n", frame.pts) |
|
|
|
if frame.pts != C.get_av_nopts_value() { |
|
frame.pts = C.av_rescale_q(frame.pts, decCtx.time_base, encCtx.time_base) |
|
} else { |
|
frame.pts = C.av_rescale_q(encodedFrameCount, C.AVRational{num: 1, den: C.int(captureFramerate)}, encCtx.time_base) |
|
} |
|
|
|
frameToEncode := frame |
|
if useGPU { |
|
if C.enum_AVPixelFormat(frame.format) == C.AV_PIX_FMT_D3D11 && frame.hw_frames_ctx != nil && encCtx.hw_frames_ctx != nil && // CORRECTED HERE |
|
frame.hw_frames_ctx.data != encCtx.hw_frames_ctx.data { |
|
printHwTransferMessageOnce() |
|
if C.av_hwframe_get_buffer(encCtx.hw_frames_ctx, transferFrame, 0) == 0 && |
|
C.av_hwframe_transfer_data(transferFrame, frame, 0) == 0 { |
|
transferFrame.pts = frame.pts |
|
transferFrame.sample_aspect_ratio = frame.sample_aspect_ratio |
|
frameToEncode = transferFrame |
|
} else { |
|
fmt.Fprintln(os.Stderr, "HW transfer for flushed frame failed. Skipping this frame.") |
|
continue |
|
} |
|
} else if C.enum_AVPixelFormat(frame.format) != C.AV_PIX_FMT_D3D11 && encCtx.pix_fmt == C.AV_PIX_FMT_D3D11 { // CORRECTED HERE |
|
fmt.Fprintf(os.Stderr, "Flushed frame is %s, but encoder expects D3D11. Skipping.\n", C.GoString(C.av_get_pix_fmt_name(C.enum_AVPixelFormat(frame.format)))) // CORRECTED HERE |
|
continue |
|
} |
|
} |
|
frameToEncode.pict_type = C.AV_PICTURE_TYPE_NONE |
|
encodeRet := encodeWriteFrame(frameToEncode, 0) |
|
if encodeRet < 0 && encodeRet != C.get_av_error_eof() { |
|
fmt.Fprintln(os.Stderr, "encode_write_frame failed for flushed decoder frame.") |
|
if finalRet == 0 || finalRet == C.get_av_error_eof() { finalRet = encodeRet } |
|
} |
|
if encodeRet >= 0 { |
|
encodedFrameCount++ |
|
} |
|
} |
|
} |
|
fmt.Printf("Lavfi output decoder flushing complete. Total encoded frames: %d\n", encodedFrameCount) |
|
|
|
fmt.Printf("Encoder will be flushed during cleanup. Final_ret before trailer: %d (%s)\n", finalRet, avErr2Str(finalRet)) |
|
|
|
if ofmtCtx != nil && ofmtCtx.pb != nil { |
|
trailerRet := C.av_write_trailer(ofmtCtx) |
|
if trailerRet < 0 { |
|
fmt.Fprintf(os.Stderr, "Error writing output trailer: %s\n", avErr2Str(trailerRet)) |
|
if finalRet == 0 || finalRet == C.get_av_error_eof() { finalRet = trailerRet } |
|
} else { |
|
fmt.Println("Output file trailer written.") |
|
} |
|
} else { |
|
if ofmtCtx != nil && (ofmtCtx.oformat.flags&C.AVFMT_NOFILE) == 0 { |
|
fmt.Println("Output format context or pb was null when expected, skipping trailer.") |
|
if finalRet == 0 { |
|
finalRet = C.get_av_error_unknown() |
|
} |
|
} |
|
} |
|
fmt.Printf("Encoding complete. Final_ret before cleanup: %d (%s)\n", finalRet, avErr2Str(finalRet)) |
|
|
|
return finalRet |
|
} |
|
|
|
func main() { |
|
_ = run() |
|
cleanup() |
|
|
|
exitCode := 0 |
|
if finalRet < 0 && finalRet != C.get_av_error_eof() { |
|
exitCode = 1 |
|
} |
|
|
|
errorString := "Success" |
|
if finalRet == C.get_av_error_eof() { |
|
errorString = "End of file" |
|
} else if finalRet < 0 { |
|
errorString = avErr2Str(finalRet) |
|
} |
|
|
|
fmt.Printf("Exiting with code: %d (%s)\n", finalRet, errorString) |
|
os.Exit(exitCode) |
|
} |