Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save grigorisg9gr/81305c880b8bb142fae429144eb83e93 to your computer and use it in GitHub Desktop.
Save grigorisg9gr/81305c880b8bb142fae429144eb83e93 to your computer and use it in GitHub Desktop.
300 Videos in the Wild evaluation script
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# grigoris, 9/2016: Evaluate the points from the latest groundtruth points.\n",
"# This script can be used to reproduce the results of \n",
"# the experiment 4.8 (300vw competition) for the paper https://arxiv.org/abs/1603.06015"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# p_base is the main folder where the annotations are located into.\n",
"p_base = '/base_path/'\n",
"\n",
"# submission folder name\n",
"f_sub = 'submission.001'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"```\n",
"(expected folder structure)\n",
"path_base\n",
"\n",
" └───category 1\n",
"\n",
" └───[name_of_clip] (e.g. 541)\n",
" |\n",
" └───[gt_folder_name] (annot)\n",
" |\n",
" └───[frame_name].[extension] (e.g. 000001.pts)\n",
" │ ...\n",
" \n",
" |\n",
" └───[submission_folder_name] (e.g. folder_i_name)\n",
" |\n",
" └───[frame_name].[extension] (e.g. 000001.pts)\n",
" │ ...\n",
" │ ...\n",
" │ ...\n",
"\n",
" └───category 2\n",
"\n",
" └───[name_of_clip]\n",
" │ [file_name].[extension] (e.g. 000001.pts)\n",
" │ ...\n",
" │ ...\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import matplotlib as mpl\n",
"from os.path import isfile, join, isdir, exists\n",
"from os import listdir \n",
"from glob import glob\n",
"import itertools\n",
"import numpy as np\n",
"from scipy.io import savemat\n",
"\n",
"try: \n",
" %matplotlib inline\n",
"except NameError:\n",
" pass"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from research_pyutils import mkdir_p\n",
"\n",
"from menpo.io import (import_pickle, export_pickle, \n",
" import_landmark_files, import_landmark_file)\n",
"\n",
"\n",
"from menpo.landmark import face_ibug_68_to_face_ibug_49 as f_49\n",
"from menpo.landmark import face_ibug_68_to_face_ibug_51 as f_51\n",
"\n",
"from menpofit.error.stats import compute_cumulative_error\n",
"from menpofit.visualize import plot_cumulative_error_distribution"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# # # visualisation options\n",
"error_range = [0., 0.081, 0.005]\n",
"line_style = ['-', '-', '-', '-', '-'] \n",
"colours = ['r', 'g', 'b', 'c', 'm', 'y','k']\n",
"marker_style = ['s', None, 'o', '>', 'v', 'x','d']\n",
"font_size = 24\n",
"linewidth = 3\n",
"grid_line_width = 1\n",
"marker_size = 14\n",
"render_legend = False\n",
"# cost for missing file is the error appended in case \n",
"# of a missing file. Practically, this should be \n",
"# infinite, but actually any value greater than 1 should do.\n",
"cost_for_missing_file = 10000\n",
"\n",
"# # # some additional options for error/landmarks:\n",
"# the normalisation to be used.\n",
"norm = 'diag'\n",
"# the landmarks we wish to calculate the error on.\n",
"pts = 68"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Functions for frame error computation"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def convert_to_proper_markup(ln, pts):\n",
" # Accepts a landmark class and converts it to \n",
" # the proper markup as requested.\n",
"\n",
" if ln.n_landmarks == pts:\n",
" # if they match the expected pts, return those.\n",
" return ln\n",
" if pts == 49 and ln.n_landmarks == 68:\n",
" ln = f_49(ln)\n",
" elif pts == 51 and ln.n_landmarks == 68:\n",
" ln = f_51(ln)\n",
" else:\n",
" m1 = ('Not recognised conversion type with pts_expected'\n",
" '{} and encountered {} points.')\n",
" raise ValueError(m1.format(pts, ln.n_landmarks))\n",
" return ln\n",
"\n",
"\n",
"def np_length(norm_shape, axis=0):\n",
" return (np.max(norm_shape, axis=axis) - np.min(norm_shape, axis=axis))\n",
"\n",
"\n",
"def compute_point_to_point_error(shape, gt_shape):\n",
" return np.mean(np.sqrt(np.sum((shape - gt_shape) ** 2, axis=-1)))\n",
"\n",
"\n",
"def compute_normaliser(norm_shape, pts, option='mean'):\n",
" \"\"\"\n",
" Computer the normaliser for a point to point error. \n",
" Option: \n",
" mean mean of the length + width of the norm_shape points.\n",
" inter interoccular distance.\n",
" diag diagonal of the bounding box (edge points).\n",
" \"\"\"\n",
" if option == 'mean': \n",
" normaliser = np.mean(np_length(norm_shape))\n",
" elif option == 'diag':\n",
" normaliser = np.linalg.norm(np_length(norm_shape))\n",
" else:\n",
" if pts == 68:\n",
" normaliser = np.linalg.norm(norm_shape[36, :] - norm_shape[45, :])\n",
" else:\n",
" normaliser = np.linalg.norm(norm_shape[19, :] - norm_shape[28, :])\n",
" return normaliser"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"# Aux functions for category, plotting"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"def compute_errors_category(files_gt, f_sub, pts, \n",
" norm, cost_for_missing_file):\n",
" errors = []\n",
" # iterate over the gt ones, import them and \n",
" # search for the respective submission one.\n",
" for fl in files_gt:\n",
" ln_gt = import_landmark_file(fl)\n",
" # try to find the submission filename.\n",
" lp = ln_gt.path\n",
" lfn = glob(join(lp.parents[1].as_posix(), f_sub, lp.stem + '*.pts'))\n",
" if len(lfn) == 0:\n",
" errors.append(cost_for_missing_file)\n",
" continue\n",
" if len(lfn) > 1:\n",
" m3 = ('More landmarks found with paths: {}. \\nOnly '\n",
" 'the first one will be loaded in this script.')\n",
" print(m3)\n",
" ln_sub = import_landmark_file(lfn[0])\n",
" if ln_sub.lms.n_points == 0:\n",
" # corner case of empty landmark file in the submissions.\n",
" errors.append(cost_for_missing_file)\n",
" continue\n",
" # first convert both to the proper markup.\n",
" ln_01 = convert_to_proper_markup(ln_gt, pts)\n",
" ln_11 = convert_to_proper_markup(ln_sub, pts)\n",
" assert(ln_01.n_landmarks == ln_11.n_landmarks)\n",
" # compute the normaliser based on the first landmark (gt). \n",
" normaliser = compute_normaliser(ln_01.lms.points, pts, norm)\n",
" err = compute_point_to_point_error(ln_11.lms.points, ln_01.lms.points)\n",
" errors.append(err / normaliser)\n",
"\n",
" return np.array(errors)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def save_error_plot(errors, name, p_s):\n",
" global colours, marker_style, line_style\n",
" # Makes the plot and saves it in the chosen path.\n",
" c = len(errors) if len(errors) < 15 else 1\n",
" colours1 = colours[0:c]\n",
" marker_style1 = marker_style[0:c]\n",
" line_style1 = line_style[0:c]\n",
" r = plot_cumulative_error_distribution(errors, error_range=error_range, new_figure=True, \n",
" line_colour=colours1, marker_edge_colour=colours1,\n",
" render_legend=render_legend, axes_font_size=font_size, \n",
" line_width=linewidth, marker_size=marker_size, \n",
" marker_edge_width=linewidth, grid_line_width=grid_line_width,\n",
" marker_style=marker_style1, line_style=line_style1, \n",
" figure_size=(14, 14))\n",
" # uncomment below if you want to export it\n",
"# r.save_figure(p_s + name + '.png', format='png', pad_inches=0.0, overwrite=True)\n",
"# r.save_figure(p_s + name + '.eps', format='eps', pad_inches=0.0, overwrite=True)\n",
" return r"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Main processing"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"m4 = ('All errors seem as that of missing file, '\n",
" 'please ensure that \\nthe paths are correct '\n",
" 'and the files as expected.')\n",
"\n",
"for n_c in range(1, 4):\n",
" cname = 'category{}'.format(n_c)\n",
" print('{} being processed.'.format(cname))\n",
" p_cat = join(p_base, , '')\n",
" assert isdir(p_cat), 'The category {} does not exist.'.format(n_c)\n",
" # search the gt files\n",
" files_gt = sorted(glob(join(p_cat, '*', 'annot', '*.pts')))\n",
" errors = compute_errors_category(files_gt, f_sub, pts, \n",
" norm, cost_for_missing_file)\n",
" # sanity check that some errors are 'sensible'.\n",
" assert np.any(errors != cost_for_missing_file), m4\n",
" # make the plot and (optionally) export it.\n",
" r = save_error_plot(errors, cname, p_cat)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.4.5"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
@grigorisg9gr
Copy link
Author

The script requires (menpo, menpofit) to be executed, while it refers to the 300-VW dataset (publication: 'The First Facial Landmark Tracking in-the-Wild Challenge: Benchmark and Results').

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment