Skip to content

Instantly share code, notes, and snippets.

@hzeller
Created May 12, 2017 01:29
Show Gist options
  • Save hzeller/524310b05065bd3e1aa41a8e26574815 to your computer and use it in GitHub Desktop.
Save hzeller/524310b05065bd3e1aa41a8e26574815 to your computer and use it in GitHub Desktop.
// -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-
//
// Quick hack based on ffmpeg
// tutorial http://dranger.com/ffmpeg/tutorial01.html
// in turn based on a tutorial by
// Martin Bohme ([email protected])
// Build with
// g++ -Wall -O3 -g video-viewer.o -o video-viewer -L../lib -lrgbmatrix -lrt -lm -lpthread `pkg-config --cflags --libs libavcodec libavformat libswscale libavutil`
// Ancient AV versions forgot to set this.
#define __STDC_CONSTANT_MACROS
#define _XOPEN_SOURCE 500
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <stdio.h>
#include <unistd.h>
#include <getopt.h>
#include <signal.h>
#include "led-matrix-c.h"
volatile char interrupt_received = 0;
static void InterruptHandler(int signo) {
interrupt_received = 1;
}
// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
# define av_frame_alloc avcodec_alloc_frame
# define av_frame_free avcodec_free_frame
#endif
struct LedPixel {
uint8_t r, g, b;
};
void FillFrame(AVFrame *pFrame, struct LedCanvas *canvas) {
// Write pixel data
int width, height;
led_canvas_get_size(canvas, &width, &height);
struct LedPixel *pix;
for(int y = 0; y < height; ++y) {
pix = (struct LedPixel*) (pFrame->data[0] + y*pFrame->linesize[0]);
for(int x = 0; x < width; ++x, ++pix) {
led_canvas_set_pixel(canvas, x, y, pix->r, pix->g, pix->b);
}
}
}
static int usage(const char *progname) {
fprintf(stderr, "usage: %s [options] <video>\n", progname);
fprintf(stderr, "Options:\n"
"\t-v : verbose.\n");
led_matrix_print_flags(stderr);
return 1;
}
int main(int argc, char *argv[]) {
struct RGBLedMatrixOptions options;
memset(&options, 0, sizeof(options));
struct RGBLedMatrix *matrix = led_matrix_create_from_options(&options,
&argc, &argv);
if (matrix == NULL) {
usage(argv[0]);
return 1;
}
struct LedCanvas *canvas = led_matrix_create_offscreen_canvas(matrix);
int canvas_width, canvas_height;
led_canvas_get_size(canvas, &canvas_width, &canvas_height);
char verbose = 0;
int opt;
while ((opt = getopt(argc, argv, "v")) != -1) {
switch (opt) {
case 'v':
verbose = 1;
break;
default:
return usage(argv[0]);
}
}
if (optind >= argc) {
fprintf(stderr, "Expected image filename.\n");
return usage(argv[0]);
}
// Initalizing these to NULL prevents segfaults!
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtxOrig = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
struct SwsContext *sws_ctx = NULL;
const char *movie_file = argv[optind];
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&pFormatCtx, movie_file, NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
if (verbose) {
av_dump_format(pFormatCtx, 0, movie_file, 0);
}
// Find the first video stream
videoStream=-1;
for(i=0; i < (int)pFormatCtx->nb_streams; ++i)
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
double fps = av_q2d(pFormatCtx->streams[videoStream]->avg_frame_rate);
if (fps < 0) {
fps = 1.0 / av_q2d(pFormatCtx->streams[videoStream]->codec->time_base);
}
if (verbose) fprintf(stderr, "FPS: %f\n", fps);
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Copy context
pCodecCtx = avcodec_alloc_context3(pCodec);
if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
canvas_width, canvas_height,
AV_PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
if (sws_ctx == 0) {
fprintf(stderr, "Trouble doing scaling to %dx%d :(\n",
canvas_width, canvas_height);
return 1;
}
signal(SIGTERM, InterruptHandler);
signal(SIGINT, InterruptHandler);
// Read frames and send to FlaschenTaschen.
const int frame_wait_micros = 1e6 / fps;
while (!interrupt_received && av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
// Convert the image from its native format to RGB
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
FillFrame(pFrameRGB, canvas);
canvas = led_matrix_swap_on_vsync(matrix, canvas);
}
usleep(frame_wait_micros);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
if (interrupt_received) {
// Feedback for Ctrl-C, but most importantly, force a newline
// at the output, so that commandline-shell editing is not messed up.
fprintf(stderr, "Got interrupt. Exiting\n");
}
led_matrix_delete(matrix);
// Free the RGB image
av_free(buffer);
av_frame_free(&pFrameRGB);
// Free the YUV frame
av_frame_free(&pFrame);
// Close the codecs
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment