Skip to content

Instantly share code, notes, and snippets.

@stevecooperorg
Created May 2, 2025 11:21
Show Gist options
  • Save stevecooperorg/76c9664463d310d190ba20659e0bd4ce to your computer and use it in GitHub Desktop.
Save stevecooperorg/76c9664463d310d190ba20659e0bd4ce to your computer and use it in GitHub Desktop.
Ludosport video sharing scripts, 2025-05-01
#!/usr/bin/env python3
#
# Script: extract_clips.py
# Purpose: Extract multiple clips from a video file using faster keyframe-based
# seeking, hardware-accelerated H.265 (hevc_videotoolbox), and specified audio filters.
# Ensures QuickTime compatibility by tagging the video track as hvc1.
import subprocess
import os
from typing import List, Tuple
import hashlib
from datetime import datetime
import re
import sys
import argparse
# https://youtu.be/8jmqHyZc1tA
# --- Configuration ---
SOURCE_FILE = os.path.expanduser("~/Downloads/PC000016.MOV")
# Define clips to extract as a list of time ranges
CLIPS_TO_EXTRACT = """
00:56:38 - -------- - steve-and-flint
00:58:38 - -------- - samule-and-georgina
01:01:08 - -------- - flint-and-oscar
01:05:11 - -------- - georgina-and-steve
01:08:12 - -------- - samule-and-flint
01:12:32 - -------- - georgina-and-oscar
01:15:48 - 01:19:17 - flint-and-steve
01:31:23 - -------- - steve-and-colin
01:31:48 - -------- - steve-and-oscar
01:32:45 - -------- - steve-and-georgina
01:33:04 - -------- - flint-and-georgina
01:33:15 - -------- - dan-and-georgina
01:33:49 - -------- - colin-and-georgina
01:34:02 - -------- - steve-and-colin
01:35:21 - -------- - flint-and-steve
01:35:37 - -------- - dan-and-georgina
01:36:01 - -------- - dan-and-colin
01:36:29 - -------- - colin-and-oscar
01:36:48 - -------- - colin-and-georgina
01:36:59 - -------- - flint-and-georgina
01:37:14 - -------- - georgina-and-steve
01:37:54 - -------- - dan-and-samule
01:38:30 - -------- - dan-and-flint
01:39:03 - -------- - flint-and-samule
01:39:49 - -------- - flint-and-georgina
01:40:10 - -------- - steve-and-oscar
01:41:46 - -------- - steve-and-georgina
01:42:36 - -------- - colin-and-georgina
01:42:54 - 01:43:09 - flint-and-colin
01:43:31 - -------- - steve-and-samule
01:43:47 - 01:44:55 - steve-and-oscar
"""
# Video encoder options (macOS hardware-accelerated H.265)
VIDEO_ENCODER_OPTS = [
"-c:v", "hevc_videotoolbox",
"-tag:v", "hvc1",
"-b:v", "5000k",
"-maxrate", "8000k",
"-bufsize", "12000k",
"-pix_fmt", "yuv420p"
]
# Audio filters
AUDIO_FILTERS = "anlmdn,equalizer=f=300:t=q:w=1:g=4,equalizer=f=1000:t=q:w=1:g=5,equalizer=f=3000:t=q:w=1:g=3,loudnorm=I=-16:TP=-1.5:LRA=11"
# Audio encoder options
AUDIO_ENCODER_OPTS = ["-c:a", "aac", "-b:a", "192k"]
def generate_filename(start_time: str, end_time: str, prefix: str = "clip") -> str:
"""Generate a filename using a hash of the time range and input file's creation date, in the same directory as SOURCE_FILE."""
# Create a string combining start and end times
time_str = f"{start_time}-{end_time}"
# Generate a short hash of the time string
hash_object = hashlib.sha256(time_str.encode())
short_hash = hash_object.hexdigest()[:7] # Use first 7 characters of hash
# Get the creation date of the source file (using birthtime on macOS)
stat = os.stat(SOURCE_FILE)
# st_birthtime is available on macOS for creation time
creation_date = datetime.fromtimestamp(stat.st_birthtime).strftime("%Y-%m-%d")
# Get the directory of the source file
output_dir = os.path.dirname(os.path.abspath(SOURCE_FILE))
# Create the filename and join it with the output directory
filename = f"{creation_date}-{prefix}-{short_hash}.mp4"
return os.path.join(output_dir, filename)
def parse_clips_config(config_str: str) -> List[Tuple[str, str, str]]:
"""Parse the clips configuration string into a list of tuples.
Each line must match either:
- 'HH:MM:SS - HH:MM:SS - name' for explicit end times
- 'HH:MM:SS - ----- - name' where end time is taken from next clip's start
where name can contain hyphens."""
# Regex pattern for HH:MM:SS format
time_pattern = r'\d{2}:\d{2}:\d{2}'
dash_pattern = r'-+' # One or more dashes
# Full line pattern: time - (time OR dashes) - name
line_pattern = rf'^({time_pattern})\s*-\s*({time_pattern}|{dash_pattern})\s*-\s*(.+?)$'
raw_clips = []
for line_num, line in enumerate(config_str.strip().split('\n'), 1):
if not line.strip():
continue
match = re.match(line_pattern, line.strip())
if not match:
print(f"Error: Line {line_num} does not match required format 'HH:MM:SS - HH:MM:SS - name' or 'HH:MM:SS - ----- - name':", file=sys.stderr)
print(f"Problematic line: {line}", file=sys.stderr)
sys.exit(1)
start_time, end_time_or_dash, prefix = match.groups()
raw_clips.append((start_time, end_time_or_dash, prefix.strip()))
# Process clips to handle dash end times
clips = []
for i, (start_time, end_time_or_dash, prefix) in enumerate(raw_clips):
# If end time is dashes, use next clip's start time (if available)
if re.match(dash_pattern, end_time_or_dash):
if i < len(raw_clips) - 1:
end_time = raw_clips[i + 1][0] # Use next clip's start time
else:
print(f"Error: Last clip cannot have dashes for end time", file=sys.stderr)
sys.exit(1)
else:
end_time = end_time_or_dash
clips.append((start_time, end_time, prefix))
return clips
def extract_clip(start_time: str, end_time: str, output_file: str) -> None:
"""Extract a single clip using ffmpeg."""
print(f"Extracting {output_file}...", file=sys.stderr)
cmd = [
"ffmpeg", "-y", # -y to overwrite without prompting
"-ss", start_time,
"-to", end_time,
"-i", SOURCE_FILE,
"-vf", "scale=1920:-2"
] + VIDEO_ENCODER_OPTS + [
"-af", AUDIO_FILTERS
] + AUDIO_ENCODER_OPTS + [
output_file
]
subprocess.run(cmd, check=True)
def time_to_seconds(time_str: str) -> int:
"""Convert HH:MM:SS to seconds."""
h, m, s = map(int, time_str.split(':'))
return h * 3600 + m * 60 + s
def generate_youtube_link(base_url: str, start_time: str) -> str:
"""Generate a YouTube link with timestamp."""
seconds = time_to_seconds(start_time)
# Remove any existing timestamp parameters
base_url = re.sub(r'[?&]t=\d+', '', base_url)
# Add the timestamp parameter
separator = '&' if '?' in base_url else '?'
return f"{base_url}{separator}t={seconds}"
def generate_discord_links(clips: List[Tuple[str, str, str]], youtube_url: str) -> str:
"""Generate Discord-friendly markdown with YouTube links."""
output = ["## Fight Clips\n"]
for start_time, end_time, prefix in clips:
link = generate_youtube_link(youtube_url, start_time)
# Replace hyphens with spaces in the display text
display_name = prefix.replace('-', ' ').title()
output.append(f"- [{display_name}]({link}) ({start_time})")
return "\n".join(output)
def main():
parser = argparse.ArgumentParser(description='Extract video clips and optionally generate YouTube links')
parser.add_argument('--links', help='YouTube URL to generate timestamped links for')
args = parser.parse_args()
clips = parse_clips_config(CLIPS_TO_EXTRACT)
if args.links:
print(generate_discord_links(clips, args.links))
return
print("Starting clip extraction...", file=sys.stderr)
for start_time, end_time, prefix in clips:
output_file = generate_filename(start_time, end_time, prefix)
extract_clip(start_time, end_time, output_file)
print("All extractions complete. Check the output files for QuickTime playback.", file=sys.stderr)
if __name__ == "__main__":
main()
#!/usr/bin/env python3
import subprocess
import os
import sys
import tempfile
import shutil
MAX_SIZE_BYTES = 10 * 1024 * 1024 # 10MB
DEFAULT_WIDTH = 600
MIN_WIDTH = 200
DEFAULT_FPS = 20
MIN_FPS = 8
def run_ffmpeg_cmd(cmd):
print("Running:", " ".join(cmd), file=sys.stderr)
subprocess.run(cmd, check=True)
def generate_palette(input_path, palette_path, fps, scale_width):
run_ffmpeg_cmd([
"ffmpeg", "-y", "-i", input_path,
"-vf", f"fps={fps},scale={scale_width}:-1:flags=lanczos,palettegen",
palette_path
])
def generate_gif(input_path, palette_path, output_path, fps, scale_width):
run_ffmpeg_cmd([
"ffmpeg", "-y", "-i", input_path, "-i", palette_path,
"-filter_complex", f"fps={fps},scale={scale_width}:-1:flags=lanczos[x];[x][1:v]paletteuse",
output_path
])
def gif_size_under_limit(gif_path):
return os.path.getsize(gif_path) < MAX_SIZE_BYTES
def convert_to_gif(input_path, output_path):
with tempfile.TemporaryDirectory() as tmpdir:
palette_path = os.path.join(tmpdir, "palette.png")
temp_gif_path = os.path.join(tmpdir, "output.gif")
scale = DEFAULT_WIDTH
fps = DEFAULT_FPS
while scale >= MIN_WIDTH:
current_fps = fps
while current_fps >= MIN_FPS:
print(f"Trying scale={scale}px, fps={current_fps}", file=sys.stderr)
generate_palette(input_path, palette_path, current_fps, scale)
generate_gif(input_path, palette_path, temp_gif_path, current_fps, scale)
if gif_size_under_limit(temp_gif_path):
shutil.copy(temp_gif_path, output_path)
print(f"Success: GIF saved to {output_path} (scale={scale}, fps={current_fps})", file=sys.stderr)
return
current_fps -= 2 # Reduce fps first
scale -= 50 # Then reduce scale
print("Failed: Could not generate GIF under 10MB", file=sys.stderr)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python mp4_to_discord_gif.py input.mp4 output.gif", file=sys.stderr)
sys.exit(1)
input_file = sys.argv[1]
output_file = sys.argv[2]
convert_to_gif(input_file, output_file)
#!/usr/bin/env bash
# Usage: ./process_video_mode.sh [--silent | --sounds] <input_file>
#
# This script produces a 1080p H.265 (Apple Silicon) output from the given input file.
# --silent: Exports without any audio.
# --sounds: Exports with audio filters and normalization (suitable for speech, etc.).
#
# Optimized for martial arts videos with variable intensity:
# - Higher average and max bitrates
# - Larger buffer size
# - Apple hardware-accelerated H.265
set -o errexit
set -o nounset
set -o pipefail
if [ $# -lt 2 ]; then
echo "Usage: $0 [--silent | --sounds] <input_file>" >&2
exit 1
fi
MODE="$1"
INPUT_FILE="$2"
# 1. Common Variables for Video
# --------------------------------------------------------------------------------
SCALE_FILTER="scale=1920:-2"
# Use Apple Silicon's H.265 encoder with a higher average bitrate and larger max rate/buffer
# to handle bursts of complex motion (like martial arts combat sequences).
VIDEO_ENCODER_OPTS="-c:v hevc_videotoolbox -tag:v hvc1 -b:v 5000k -maxrate 8000k -bufsize 12000k -pix_fmt yuv420p"
# 2. Common Output File Naming
# --------------------------------------------------------------------------------
BASE_NAME="${INPUT_FILE%.*}"
# 3. Audio Options for --sounds
# --------------------------------------------------------------------------------
# We keep the same filters as your original script, which includes noise reduction (anlmdn),
# EQ adjustments, and loudness normalization.
# You might tweak these if your martial arts sessions have specific audio needs (e.g.,
# commentary vs. ambient room noise).
AUDIO_FILTERS="anlmdn, \
equalizer=f=300:t=q:w=1:g=4, \
equalizer=f=1000:t=q:w=1:g=5, \
equalizer=f=3000:t=q:w=1:g=3, \
loudnorm=I=-16:TP=-1.5:LRA=11"
AUDIO_ENCODER_OPTS="-c:a aac -b:a 192k"
# 4. Branching Logic for Silent vs. Sound
# --------------------------------------------------------------------------------
case "$MODE" in
--silent)
OUTPUT_FILE="${BASE_NAME}_1080p_silent.mp4"
echo "==> Processing video (SILENT) to 1080p..."
ffmpeg -hwaccel videotoolbox -y -i "$INPUT_FILE" \
-vf "$SCALE_FILTER" \
$VIDEO_ENCODER_OPTS \
-an \
"$OUTPUT_FILE"
;;
--sounds)
OUTPUT_FILE="${BASE_NAME}_1080p_sounds.mp4"
echo "==> Processing video with audio enhancements to 1080p..."
ffmpeg -y -i "$INPUT_FILE" \
-vf "$SCALE_FILTER" \
$VIDEO_ENCODER_OPTS \
-af "$AUDIO_FILTERS" \
$AUDIO_ENCODER_OPTS \
"$OUTPUT_FILE"
;;
*)
echo "ERROR: Invalid mode '$MODE'. Please use --silent or --sounds." >&2
exit 1
;;
esac
echo "==> Done! Output saved to: $OUTPUT_FILE"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment