Skip to content

Instantly share code, notes, and snippets.

@Cdaprod
Last active May 28, 2025 22:28
Show Gist options
  • Save Cdaprod/e3a259c77b36e8d0ce95a01e2850552c to your computer and use it in GitHub Desktop.
Save Cdaprod/e3a259c77b36e8d0ce95a01e2850552c to your computer and use it in GitHub Desktop.
Blender BPY — Here’s how to extract a frame and map it onto a traced 3D object:​​​​​​​​​​​​​​​​
"""
Nikon + Polycam iOS - Camera Rig
"""
#!/usr/bin/env python3
"""
Camera Rig + iPhone Polycam Automation System
Monitors for new Polycam scans and camera footage, then processes automatically
"""
import os
import time
import json
import subprocess
from pathlib import Path
from datetime import datetime
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# ============ CONFIGURATION ============
CONFIG = {
'polycam_export_folder': '/path/to/polycam/exports', # Where Polycam saves files
'camera_footage_folder': '/path/to/camera/footage', # Where camera saves files
'processing_folder': '/path/to/processing', # Working directory
'output_folder': '/path/to/final/output', # Final processed files
'blender_executable': '/Applications/Blender.app/Contents/MacOS/Blender',
'ffmpeg_path': '/usr/local/bin/ffmpeg',
'sync_tolerance': 30, # seconds - how close timestamps need to be
}
# ============ FILE MONITORING ============
class MediaFileHandler(FileSystemEventHandler):
def __init__(self):
self.pending_scans = {}
self.pending_footage = {}
def on_created(self, event):
if event.is_directory:
return
file_path = Path(event.src_path)
timestamp = datetime.now()
# Polycam scan detected
if file_path.suffix.lower() in ['.ply', '.obj', '.gltf']:
print(f"🔍 New Polycam scan: {file_path.name}")
self.pending_scans[timestamp] = file_path
self.check_for_matches(timestamp)
# Camera footage detected
elif file_path.suffix.lower() in ['.mp4', '.mov', '.avi']:
print(f"🎬 New camera footage: {file_path.name}")
self.pending_footage[timestamp] = file_path
self.check_for_matches(timestamp)
def check_for_matches(self, new_timestamp):
"""Check if we have matching scan + footage within sync tolerance"""
# Find matching pairs within time tolerance
matches = []
for scan_time, scan_path in self.pending_scans.items():
for footage_time, footage_path in self.pending_footage.items():
time_diff = abs((scan_time - footage_time).total_seconds())
if time_diff <= CONFIG['sync_tolerance']:
matches.append({
'scan_path': scan_path,
'footage_path': footage_path,
'scan_time': scan_time,
'footage_time': footage_time,
'time_diff': time_diff
})
# Process matches
for match in matches:
print(f"✅ Found synchronized pair (Δ{match['time_diff']:.1f}s)")
self.process_synchronized_capture(match)
# Remove from pending
self.pending_scans.pop(match['scan_time'], None)
self.pending_footage.pop(match['footage_time'], None)
def process_synchronized_capture(self, match):
"""Process synchronized scan + footage"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
project_name = f"capture_{timestamp}"
processor = CaptureProcessor(project_name, match)
processor.run_full_pipeline()
# ============ PROCESSING PIPELINE ============
class CaptureProcessor:
def __init__(self, project_name, match_data):
self.project_name = project_name
self.match = match_data
self.project_dir = Path(CONFIG['processing_folder']) / project_name
self.project_dir.mkdir(parents=True, exist_ok=True)
def run_full_pipeline(self):
"""Execute complete processing pipeline"""
print(f"🚀 Starting processing pipeline: {self.project_name}")
try:
# Step 1: Copy and organize files
self.organize_source_files()
# Step 2: Extract key frames from footage
self.extract_key_frames()
# Step 3: Optimize Polycam scan
self.optimize_scan()
# Step 4: Generate texture variants
self.generate_texture_variants()
# Step 5: Create Blender scene (optional, if Blender available)
if self.blender_available():
self.create_blender_scene()
# Step 6: Generate preview renders
self.generate_previews()
# Step 7: Package final output
self.package_output()
print(f"✅ Pipeline complete: {self.project_name}")
except Exception as e:
print(f"❌ Pipeline failed: {e}")
self.cleanup_failed_processing()
def organize_source_files(self):
"""Copy and organize source files"""
# Copy scan
scan_dest = self.project_dir / "scan" / self.match['scan_path'].name
scan_dest.parent.mkdir(exist_ok=True)
self.copy_file(self.match['scan_path'], scan_dest)
self.scan_path = scan_dest
# Copy footage
footage_dest = self.project_dir / "footage" / self.match['footage_path'].name
footage_dest.parent.mkdir(exist_ok=True)
self.copy_file(self.match['footage_path'], footage_dest)
self.footage_path = footage_dest
print(f"📁 Organized files in: {self.project_dir}")
def extract_key_frames(self):
"""Extract key frames from camera footage"""
frames_dir = self.project_dir / "frames"
frames_dir.mkdir(exist_ok=True)
# Extract frames at 1fps for texture options
cmd = [
CONFIG['ffmpeg_path'],
'-i', str(self.footage_path),
'-vf', 'fps=1',
'-q:v', '2', # High quality
str(frames_dir / 'frame_%04d.jpg')
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
frame_count = len(list(frames_dir.glob('*.jpg')))
print(f"🖼️ Extracted {frame_count} frames")
self.frames_dir = frames_dir
else:
raise Exception(f"FFmpeg failed: {result.stderr}")
def optimize_scan(self):
"""Optimize Polycam scan for processing"""
# For now, just copy - could add mesh optimization tools here
optimized_dir = self.project_dir / "optimized"
optimized_dir.mkdir(exist_ok=True)
optimized_scan = optimized_dir / self.scan_path.name
self.copy_file(self.scan_path, optimized_scan)
self.optimized_scan = optimized_scan
print(f"🔧 Scan optimization complete")
def generate_texture_variants(self):
"""Generate different texture mapping variants"""
textures_dir = self.project_dir / "textures"
textures_dir.mkdir(exist_ok=True)
# Create texture variants using different frames
frame_files = sorted(self.frames_dir.glob('*.jpg'))
# Select key frames (first, middle, last)
key_frames = [
frame_files[0], # First frame
frame_files[len(frame_files)//2], # Middle frame
frame_files[-1] # Last frame
]
for i, frame in enumerate(key_frames):
variant_name = f"texture_variant_{i+1:02d}.jpg"
variant_path = textures_dir / variant_name
self.copy_file(frame, variant_path)
self.texture_variants = list(textures_dir.glob('*.jpg'))
print(f"🎨 Generated {len(self.texture_variants)} texture variants")
def blender_available(self):
"""Check if Blender is available for processing"""
return os.path.exists(CONFIG['blender_executable'])
def create_blender_scene(self):
"""Create Blender scene with scan + textures"""
blender_dir = self.project_dir / "blender"
blender_dir.mkdir(exist_ok=True)
# Create Blender Python script
script_content = f'''
import bpy
import os
# Clear scene
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# Import scan
scan_path = r"{self.optimized_scan}"
if scan_path.endswith('.ply'):
bpy.ops.import_mesh.ply(filepath=scan_path)
elif scan_path.endswith('.obj'):
bpy.ops.import_scene.obj(filepath=scan_path)
# Get imported object
scan_obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if scan_obj:
# Apply first texture variant
texture_path = r"{self.texture_variants[0]}"
# Create material
material = bpy.data.materials.new("Captured_Material")
material.use_nodes = True
material.node_tree.nodes.clear()
# Add nodes
nodes = material.node_tree.nodes
output = nodes.new('ShaderNodeOutputMaterial')
principled = nodes.new('ShaderNodeBsdfPrincipled')
tex_image = nodes.new('ShaderNodeTexImage')
# Load texture
texture_image = bpy.data.images.load(texture_path)
tex_image.image = texture_image
# Connect nodes
material.node_tree.links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
material.node_tree.links.new(principled.outputs['BSDF'], output.inputs['Surface'])
# Assign material
scan_obj.data.materials.append(material)
# Add camera and lighting
bpy.ops.object.camera_add(location=(3, -3, 2))
bpy.ops.object.light_add(type='SUN', location=(2, 2, 5))
# Save blend file
bpy.ops.wm.save_as_mainfile(filepath=r"{blender_dir / 'scene.blend'}")
print("Blender scene created successfully")
'''
script_path = blender_dir / 'create_scene.py'
with open(script_path, 'w') as f:
f.write(script_content)
# Run Blender script
cmd = [
CONFIG['blender_executable'],
'--background',
'--python', str(script_path)
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
print(f"🎨 Blender scene created: {blender_dir / 'scene.blend'}")
else:
print(f"⚠️ Blender processing failed: {result.stderr}")
def generate_previews(self):
"""Generate preview images and videos"""
previews_dir = self.project_dir / "previews"
previews_dir.mkdir(exist_ok=True)
# Create preview video from extracted frames
preview_video = previews_dir / "texture_preview.mp4"
cmd = [
CONFIG['ffmpeg_path'],
'-framerate', '2',
'-i', str(self.frames_dir / 'frame_%04d.jpg'),
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
str(preview_video)
]
subprocess.run(cmd, capture_output=True)
print(f"🎬 Preview video created: {preview_video}")
def package_output(self):
"""Package final output files"""
output_dir = Path(CONFIG['output_folder']) / self.project_name
output_dir.mkdir(parents=True, exist_ok=True)
# Copy key files to output
files_to_copy = [
(self.optimized_scan, output_dir / "scan.ply"),
(self.texture_variants[0], output_dir / "texture_primary.jpg"),
(self.project_dir / "previews" / "texture_preview.mp4", output_dir / "preview.mp4")
]
for src, dst in files_to_copy:
if src.exists():
self.copy_file(src, dst)
# Create metadata file
metadata = {
'project_name': self.project_name,
'scan_file': str(self.match['scan_path']),
'footage_file': str(self.match['footage_path']),
'capture_time_diff': self.match['time_diff'],
'frame_count': len(list(self.frames_dir.glob('*.jpg'))),
'texture_variants': len(self.texture_variants),
'processing_complete': datetime.now().isoformat()
}
with open(output_dir / 'metadata.json', 'w') as f:
json.dump(metadata, f, indent=2)
print(f"📦 Output packaged: {output_dir}")
def copy_file(self, src, dst):
"""Copy file with error handling"""
try:
dst.parent.mkdir(parents=True, exist_ok=True)
subprocess.run(['cp', str(src), str(dst)], check=True)
except subprocess.CalledProcessError as e:
print(f"⚠️ Failed to copy {src} to {dst}: {e}")
def cleanup_failed_processing(self):
"""Clean up if processing fails"""
print(f"🧹 Cleaning up failed processing: {self.project_name}")
# Could implement cleanup logic here
# ============ MAIN AUTOMATION SYSTEM ============
class CameraRigAutomation:
def __init__(self):
self.observer = Observer()
self.handler = MediaFileHandler()
def start_monitoring(self):
"""Start monitoring for new files"""
print("🚀 Starting Camera Rig + Polycam Automation System")
print(f"📁 Monitoring Polycam exports: {CONFIG['polycam_export_folder']}")
print(f"🎬 Monitoring camera footage: {CONFIG['camera_footage_folder']}")
# Set up file system monitoring
self.observer.schedule(
self.handler,
CONFIG['polycam_export_folder'],
recursive=True
)
self.observer.schedule(
self.handler,
CONFIG['camera_footage_folder'],
recursive=True
)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\n🛑 Stopping automation system...")
self.observer.stop()
self.observer.join()
print("✅ Automation system stopped")
def process_existing_files(self):
"""Process any existing files in the directories"""
print("🔍 Checking for existing files to process...")
# This could scan existing files and process them
polycam_files = Path(CONFIG['polycam_export_folder']).glob('**/*.ply')
camera_files = Path(CONFIG['camera_footage_folder']).glob('**/*.mp4')
# Match files by timestamp and process
# Implementation depends on your file naming convention
# ============ USAGE ============
if __name__ == "__main__":
# Update CONFIG paths for your setup
print("📱 Camera Rig + iPhone Polycam Automation")
print("⚙️ Update CONFIG paths before running!")
automation = CameraRigAutomation()
automation.start_monitoring()

If I take a frame from image or video… and trace out a 3d object in it… how do I map the texture from the frame onto the new object?

Great question! Here's how to extract a frame and map it onto a traced 3D object:​​​​​​​​​​​​​​​​

Frame-to-3D Object Texture Mapping - Complete Workflow

🎯 The Process:

1. Extract Frame:

frame_path, video_image = extract_frame_from_video("terminal.mp4", frame_number=120)

2. Trace 3D Object:

# Define vertices/faces for your traced shape
vertices = [(-2, -1.5, 0), (2, -1.5, 0), (2, 1.5, 0), (-2, 1.5, 0)]
faces = [[0, 1, 2, 3]]
traced_obj = create_mesh_from_vertices(vertices, faces, "Terminal_Window")

3. Apply Texture with UV Mapping:

material = apply_frame_texture_to_object(
    traced_obj, video_image,
    uv_method='bounds',  # or 'auto' or 'camera'
    bounds=(0.1, 0.2, 0.9, 0.8)  # area of frame to map
)

🎨 Three UV Mapping Methods:

1. Auto Unwrapping ('auto'):

  • Smart UV projection
  • Good for complex shapes
  • Automatic seam detection

2. Camera Projection ('camera'):

  • Projects texture from camera viewpoint
  • Perfect for flat surfaces viewed from specific angle
  • Requires camera object as parameter

3. Manual Bounds ('bounds'):

  • Map specific area of frame to object
  • Define UV bounds: (left, bottom, right, top) in 0-1 coordinates
  • Best for precise control

📱 iPhone Terminal Example:

def trace_terminal_from_screenshot():
    # Load iPhone screenshot
    screenshot_path = "/path/to/terminal_screenshot.png"
    
    # Create terminal window shape (customize to match your trace)
    terminal_vertices = [
        # Rectangle matching terminal area
        (-1.5, -2.0, 0.0), (1.5, -2.0, 0.0), 
        (1.5, 2.0, 0.0), (-1.5, 2.0, 0.0)
    ]
    terminal_faces = [[0, 1, 2, 3]]
    
    # Create object
    terminal_obj = create_mesh_from_vertices(
        terminal_vertices, terminal_faces, "iPhone_Terminal"
    )
    
    # Map terminal area from screenshot
    # Bounds represent where terminal appears in your screenshot
    terminal_area = (0.05, 0.15, 0.95, 0.85)  # Adjust these values
    
    material = apply_frame_texture_to_object(
        terminal_obj, screenshot_path,
        uv_method='bounds',
        bounds=terminal_area
    )
    
    return terminal_obj, material

🎬 Video Frame Workflow:

# Complete video-to-3D workflow
result = trace_object_from_video_frame(
    video_path="terminal_recording.mp4",
    frame_number=150,  # Frame to extract
    vertices=your_traced_vertices,
    faces=your_traced_faces,
    object_name="Terminal_3D"
)

traced_object = result['object']
material = result['material']

🔧 Interactive Tracing Setup:

# Set up environment for manual tracing
setup = interactive_trace_setup("terminal_video.mp4", "My_Terminal")

# This creates:
# - Reference plane with current frame
# - Empty object to trace over
# - Camera positioned for tracing
# - All ready for manual modeling

💡 Pro Tips:

  1. Determine UV Bounds: Use image editing software to find exact pixel coordinates of your traced area, then convert to 0-1 UV space

  2. Frame Selection: Choose frames with good lighting and clear visibility of the object you're tracing

  3. Vertex Placement: When tracing, place vertices to match the perspective and shape exactly as seen in the frame

  4. Multiple Angles: For complex objects, trace from multiple frames/angles and use different UV mapping methods

This system gives you complete control over extracting frames and precisely mapping them onto your traced 3D geometry!​​​​​​​​​​​​​​​​

import bpy
import bmesh
import mathutils
from mathutils import Vector
import os
# ============ FRAME EXTRACTION ============
def extract_frame_from_video(video_path, frame_number, output_path=None):
"""Extract specific frame from video and save as image"""
# Load video
if not os.path.exists(video_path):
raise FileNotFoundError(f"Video not found: {video_path}")
# Create output path if not provided
if output_path is None:
base_name = os.path.splitext(os.path.basename(video_path))[0]
output_path = f"/tmp/{base_name}_frame_{frame_number:04d}.png"
# Load video as image sequence
video_image = bpy.data.images.load(video_path)
video_image.source = 'MOVIE'
# Set to specific frame
video_image.frame_current = frame_number
# Save frame as image
video_image.save_render(output_path)
print(f"📸 Extracted frame {frame_number} to: {output_path}")
return output_path, video_image
def get_current_viewport_frame():
"""Capture current frame from video texture in viewport"""
# This gets the current frame being displayed
scene = bpy.context.scene
current_frame = scene.frame_current
return current_frame
# ============ 3D OBJECT TRACING METHODS ============
def create_traced_object_from_reference(reference_image_path, object_name="Traced_Object"):
"""Create base mesh for tracing over reference image"""
# Load reference image
ref_image = bpy.data.images.load(reference_image_path)
# Create reference plane for tracing
bpy.ops.mesh.primitive_plane_add(size=4, location=(0, 0, 0))
ref_plane = bpy.context.object
ref_plane.name = f"{object_name}_Reference"
# Apply reference image to plane
ref_material = bpy.data.materials.new(f"{object_name}_Reference_Mat")
ref_material.use_nodes = True
ref_material.node_tree.nodes.clear()
nodes = ref_material.node_tree.nodes
links = ref_material.node_tree.links
output = nodes.new('ShaderNodeOutputMaterial')
emission = nodes.new('ShaderNodeEmission')
tex_image = nodes.new('ShaderNodeTexImage')
tex_image.image = ref_image
links.new(tex_image.outputs['Color'], emission.inputs['Color'])
links.new(emission.outputs['Emission'], output.inputs['Surface'])
ref_plane.data.materials.append(ref_material)
# Create new mesh for tracing
bpy.ops.mesh.primitive_cube_add(location=(0, 0, 1))
traced_object = bpy.context.object
traced_object.name = object_name
return traced_object, ref_plane, ref_image
def create_mesh_from_vertices(vertices, faces, object_name="Custom_Mesh"):
"""Create mesh from vertex/face data (for precise tracing)"""
# Create new mesh
mesh = bpy.data.meshes.new(object_name)
# Create mesh from vertices and faces
mesh.from_pydata(vertices, [], faces)
mesh.update()
# Create object
obj = bpy.data.objects.new(object_name, mesh)
bpy.context.collection.objects.link(obj)
return obj
# ============ UV MAPPING METHODS ============
def auto_uv_unwrap_object(obj):
"""Automatically unwrap object for texture mapping"""
# Select object and enter edit mode
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
# Select all faces
bpy.ops.mesh.select_all(action='SELECT')
# Smart UV unwrap
bpy.ops.uv.smart_project(
angle_limit=66,
island_margin=0.02,
area_weight=0.0,
correct_aspect=True
)
# Exit edit mode
bpy.ops.object.mode_set(mode='OBJECT')
print(f"✅ UV unwrapped: {obj.name}")
def project_uv_from_camera(obj, camera, image_width, image_height):
"""Project UV coordinates from camera view (camera mapping)"""
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
# Get mesh data
mesh = bmesh.from_mesh(obj.data)
mesh.faces.ensure_lookup_table()
# Ensure UV layer exists
if not mesh.loops.layers.uv:
mesh.loops.layers.uv.new("UVMap")
uv_layer = mesh.loops.layers.uv.active
# Calculate camera projection for each vertex
for face in mesh.faces:
for loop in face.loops:
vertex = loop.vert
# Convert vertex to world coordinates
world_pos = obj.matrix_world @ vertex.co
# Project to camera view
camera_coords = mathutils.geometry.intersect_line_plane(
world_pos, world_pos + (camera.location - world_pos).normalized() * 1000,
Vector((0, 0, 0)), Vector((0, 0, 1))
)
if camera_coords:
# Convert to UV coordinates (0-1 range)
u = (camera_coords.x + 2) / 4 # Adjust based on your scene scale
v = (camera_coords.y + 2) / 4
# Clamp to 0-1 range
u = max(0, min(1, u))
v = max(0, min(1, v))
loop[uv_layer].uv = (u, v)
# Update mesh
bmesh.update_edit_mesh(obj.data)
bpy.ops.object.mode_set(mode='OBJECT')
print(f"📐 Camera-projected UV mapping applied to: {obj.name}")
def manual_uv_mapping_from_bounds(obj, bounds_2d):
"""Map UV coordinates based on 2D bounds in the reference image"""
# bounds_2d format: (min_x, min_y, max_x, max_y) in 0-1 UV space
min_u, min_v, max_u, max_v = bounds_2d
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
# Get mesh data
mesh = bmesh.from_mesh(obj.data)
# Ensure UV layer
if not mesh.loops.layers.uv:
mesh.loops.layers.uv.new("UVMap")
uv_layer = mesh.loops.layers.uv.active
# Get object bounds in local space
vertices = [v.co for v in mesh.verts]
if not vertices:
return
local_min = Vector((
min(v.x for v in vertices),
min(v.y for v in vertices),
min(v.z for v in vertices)
))
local_max = Vector((
max(v.x for v in vertices),
max(v.y for v in vertices),
max(v.z for v in vertices)
))
# Map vertices to UV bounds
for face in mesh.faces:
for loop in face.loops:
vertex = loop.vert.co
# Normalize vertex position within object bounds
normalized_x = (vertex.x - local_min.x) / (local_max.x - local_min.x) if local_max.x != local_min.x else 0
normalized_y = (vertex.y - local_min.y) / (local_max.y - local_min.y) if local_max.y != local_min.y else 0
# Map to UV bounds
u = min_u + normalized_x * (max_u - min_u)
v = min_v + normalized_y * (max_v - min_v)
loop[uv_layer].uv = (u, v)
# Update mesh
bmesh.update_edit_mesh(obj.data)
bpy.ops.object.mode_set(mode='OBJECT')
print(f"🎯 Manual UV bounds mapping applied: {bounds_2d}")
# ============ TEXTURE APPLICATION ============
def apply_frame_texture_to_object(obj, frame_image_path, uv_method='auto', **kwargs):
"""Apply frame texture to object with specified UV mapping method"""
# Load frame image
if isinstance(frame_image_path, str):
frame_image = bpy.data.images.load(frame_image_path)
else:
frame_image = frame_image_path # Already a Blender image
# Apply UV mapping based on method
if uv_method == 'auto':
auto_uv_unwrap_object(obj)
elif uv_method == 'camera' and 'camera' in kwargs:
project_uv_from_camera(obj, kwargs['camera'], frame_image.size[0], frame_image.size[1])
elif uv_method == 'bounds' and 'bounds' in kwargs:
manual_uv_mapping_from_bounds(obj, kwargs['bounds'])
# Create material with frame texture
material = bpy.data.materials.new(f"{obj.name}_Frame_Material")
material.use_nodes = True
material.node_tree.nodes.clear()
nodes = material.node_tree.nodes
links = material.node_tree.links
# Create nodes
output = nodes.new('ShaderNodeOutputMaterial')
principled = nodes.new('ShaderNodeBsdfPrincipled')
tex_image = nodes.new('ShaderNodeTexImage')
tex_coord = nodes.new('ShaderNodeTexCoord')
mapping = nodes.new('ShaderNodeMapping')
# Set image
tex_image.image = frame_image
# Connect nodes
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
links.new(mapping.outputs['Vector'], tex_image.inputs['Vector'])
links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
links.new(principled.outputs['BSDF'], output.inputs['Surface'])
# Assign material
if obj.data.materials:
obj.data.materials[0] = material
else:
obj.data.materials.append(material)
print(f"🎨 Applied frame texture to {obj.name} using {uv_method} UV mapping")
return material
# ============ COMPLETE WORKFLOW FUNCTIONS ============
def trace_object_from_video_frame(video_path, frame_number, vertices, faces, object_name="Video_Traced_Object"):
"""Complete workflow: Extract frame → Create object → Apply texture"""
# Step 1: Extract frame
frame_path, video_image = extract_frame_from_video(video_path, frame_number)
# Step 2: Create traced object
traced_obj = create_mesh_from_vertices(vertices, faces, object_name)
# Step 3: Apply texture with auto UV mapping
material = apply_frame_texture_to_object(traced_obj, video_image)
return {
'object': traced_obj,
'material': material,
'frame_image': video_image,
'frame_path': frame_path
}
def interactive_trace_setup(reference_media_path, object_name="Interactive_Trace"):
"""Set up interactive tracing environment"""
# Determine if image or video
ext = os.path.splitext(reference_media_path)[1].lower()
is_video = ext in {'.mp4', '.mov', '.avi', '.mkv', '.webm', '.m4v'}
if is_video:
# Extract current frame for reference
current_frame = bpy.context.scene.frame_current
frame_path, frame_image = extract_frame_from_video(reference_media_path, current_frame)
reference_path = frame_path
else:
# Use image directly
reference_path = reference_media_path
frame_image = bpy.data.images.load(reference_path)
# Create tracing setup
traced_obj, ref_plane, ref_image = create_traced_object_from_reference(reference_path, object_name)
# Position camera for tracing
bpy.ops.object.camera_add(location=(0, -8, 4))
camera = bpy.context.object
camera.name = f"{object_name}_Trace_Camera"
# Point camera at reference
camera.rotation_euler = (1.1, 0, 0)
bpy.context.scene.camera = camera
return {
'traced_object': traced_obj,
'reference_plane': ref_plane,
'camera': camera,
'reference_image': ref_image,
'frame_image': frame_image if is_video else None
}
def finalize_traced_object_texture(traced_obj, frame_image, uv_bounds=None, camera=None):
"""Finalize texture mapping for traced object"""
if uv_bounds:
# Use manual bounds mapping
material = apply_frame_texture_to_object(
traced_obj, frame_image,
uv_method='bounds',
bounds=uv_bounds
)
elif camera:
# Use camera projection mapping
material = apply_frame_texture_to_object(
traced_obj, frame_image,
uv_method='camera',
camera=camera
)
else:
# Use auto mapping
material = apply_frame_texture_to_object(traced_obj, frame_image)
return material
# ============ EXAMPLE USAGE ============
def example_terminal_object_tracing():
"""Example: Trace a terminal window from iPhone screenshot"""
# Example iPhone screenshot with terminal
screenshot_path = "/path/to/terminal_screenshot.png"
# Define vertices for a simple terminal window shape (customize as needed)
terminal_vertices = [
# Terminal window outline vertices (adjust coordinates based on your trace)
(-2.0, -1.5, 0.0), ( 2.0, -1.5, 0.0), ( 2.0, 1.5, 0.0), (-2.0, 1.5, 0.0), # Base
(-2.0, -1.5, 0.1), ( 2.0, -1.5, 0.1), ( 2.0, 1.5, 0.1), (-2.0, 1.5, 0.1), # Top
]
terminal_faces = [
# Terminal window faces (quads)
[0, 1, 2, 3], # Bottom
[4, 7, 6, 5], # Top
[0, 4, 5, 1], # Front
[2, 6, 7, 3], # Back
[0, 3, 7, 4], # Left
[1, 5, 6, 2], # Right
]
# Create traced object
traced_obj = create_mesh_from_vertices(terminal_vertices, terminal_faces, "Terminal_Window")
# Load screenshot and apply texture
screenshot_image = bpy.data.images.load(screenshot_path)
# UV bounds for terminal area in screenshot (0-1 coordinates)
# You'd determine these by examining where the terminal appears in your screenshot
terminal_bounds = (0.1, 0.2, 0.9, 0.8) # left, bottom, right, top
material = apply_frame_texture_to_object(
traced_obj, screenshot_image,
uv_method='bounds',
bounds=terminal_bounds
)
print("🖥️ Terminal window traced and textured!")
return traced_obj, material
# ============ MAIN EXECUTION ============
if __name__ == "__main__":
print("🎯 Frame-to-3D Object Texture Mapping System Ready!")
print("📖 Available methods:")
print(" 1. trace_object_from_video_frame() - Complete video workflow")
print(" 2. interactive_trace_setup() - Set up tracing environment")
print(" 3. apply_frame_texture_to_object() - Apply texture with UV options")
print(" 4. example_terminal_object_tracing() - Terminal window example")
print("")
print("🔧 UV Mapping Options:")
print(" - 'auto': Smart projection unwrapping")
print(" - 'camera': Camera-based projection mapping")
print(" - 'bounds': Manual UV bounds mapping")
import bpy
import bmesh
import os
from mathutils import Vector, Matrix
import mathutils
# ============ POLYCAM IMPORT UTILITIES ============
def import_polycam_scan(file_path, scan_name="Polycam_Scan"):
"""Import Polycam scan (supports .ply, .obj, .fbx, .gltf formats)"""
ext = os.path.splitext(file_path)[1].lower()
# Clear selection
bpy.ops.object.select_all(action='DESELECT')
# Import based on file format
if ext == '.ply':
bpy.ops.import_mesh.ply(filepath=file_path)
elif ext == '.obj':
bpy.ops.import_scene.obj(filepath=file_path)
elif ext == '.fbx':
bpy.ops.import_scene.fbx(filepath=file_path)
elif ext in ['.gltf', '.glb']:
bpy.ops.import_scene.gltf(filepath=file_path)
else:
raise ValueError(f"Unsupported Polycam format: {ext}")
# Get imported object(s)
imported_objects = [obj for obj in bpy.context.selected_objects]
if imported_objects:
main_object = imported_objects[0]
main_object.name = scan_name
print(f"✅ Imported Polycam scan: {scan_name}")
print(f"📊 Vertices: {len(main_object.data.vertices)}")
print(f"📐 Faces: {len(main_object.data.polygons)}")
return main_object, imported_objects
else:
raise Exception("Failed to import Polycam scan")
def optimize_polycam_mesh(polycam_obj, decimate_ratio=0.5, smooth_iterations=2):
"""Optimize Polycam mesh for better performance and texturing"""
bpy.context.view_layer.objects.active = polycam_obj
polycam_obj.select_set(True)
# Enter edit mode
bpy.ops.object.mode_set(mode='EDIT')
# Clean up mesh
bpy.ops.mesh.select_all(action='SELECT')
# Remove doubles/merge vertices
bpy.ops.mesh.remove_doubles(threshold=0.001)
# Fill holes
bpy.ops.mesh.fill_holes(sides=0)
# Recalculate normals
bpy.ops.mesh.normals_make_consistent(inside=False)
# Exit edit mode
bpy.ops.object.mode_set(mode='OBJECT')
# Add decimate modifier for optimization
if decimate_ratio < 1.0:
decimate_mod = polycam_obj.modifiers.new(name="Decimate", type='DECIMATE')
decimate_mod.ratio = decimate_ratio
decimate_mod.use_collapse_triangulate = True
# Add smooth shading
if smooth_iterations > 0:
smooth_mod = polycam_obj.modifiers.new(name="Smooth", type='SMOOTH')
smooth_mod.iterations = smooth_iterations
print(f"🔧 Optimized Polycam mesh: {polycam_obj.name}")
return polycam_obj
# ============ POLYCAM + FRAME TEXTURE WORKFLOWS ============
def replace_polycam_texture_with_frame(polycam_obj, frame_image_path, uv_method='preserve'):
"""Replace Polycam's original texture with frame from video/screenshot"""
# Load frame image
if isinstance(frame_image_path, str):
frame_image = bpy.data.images.load(frame_image_path)
else:
frame_image = frame_image_path
# Handle UV mapping
if uv_method == 'preserve':
# Keep existing UV coordinates from Polycam
print("📐 Preserving original Polycam UV mapping")
elif uv_method == 'auto':
# Create new UV mapping
auto_uv_unwrap_object(polycam_obj)
elif uv_method == 'project':
# Project from current camera view
project_uv_from_active_camera(polycam_obj)
# Create new material with frame texture
material = bpy.data.materials.new(f"{polycam_obj.name}_Frame_Material")
material.use_nodes = True
material.node_tree.nodes.clear()
nodes = material.node_tree.nodes
links = material.node_tree.links
# Create enhanced material nodes
output = nodes.new('ShaderNodeOutputMaterial')
principled = nodes.new('ShaderNodeBsdfPrincipled')
tex_image = nodes.new('ShaderNodeTexImage')
tex_coord = nodes.new('ShaderNodeTexCoord')
mapping = nodes.new('ShaderNodeMapping')
# Optional: Add normal map for depth (if Polycam had good geometry)
normal_map = nodes.new('ShaderNodeNormalMap')
bump = nodes.new('ShaderNodeBump')
# Set frame image
tex_image.image = frame_image
tex_image.interpolation = 'Linear'
# Enhanced material settings for scanned objects
principled.inputs['Roughness'].default_value = 0.3
principled.inputs['Specular'].default_value = 0.1
bump.inputs['Strength'].default_value = 0.2 # Subtle surface detail
# Connect nodes
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector'])
links.new(mapping.outputs['Vector'], tex_image.inputs['Vector'])
links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
# Add subtle bump mapping from color
links.new(tex_image.outputs['Color'], bump.inputs['Height'])
links.new(bump.outputs['Normal'], principled.inputs['Normal'])
links.new(principled.outputs['BSDF'], output.inputs['Surface'])
# Replace existing materials
polycam_obj.data.materials.clear()
polycam_obj.data.materials.append(material)
print(f"🎨 Applied frame texture to Polycam scan: {polycam_obj.name}")
return material
def blend_polycam_with_frame_texture(polycam_obj, frame_image_path, blend_factor=0.5):
"""Blend original Polycam texture with frame texture"""
frame_image = bpy.data.images.load(frame_image_path) if isinstance(frame_image_path, str) else frame_image_path
# Get or create material
if polycam_obj.data.materials:
material = polycam_obj.data.materials[0]
if not material.use_nodes:
material.use_nodes = True
else:
material = bpy.data.materials.new(f"{polycam_obj.name}_Blended_Material")
material.use_nodes = True
polycam_obj.data.materials.append(material)
nodes = material.node_tree.nodes
links = material.node_tree.links
# Find existing image texture (from Polycam)
existing_tex = None
for node in nodes:
if node.type == 'TEX_IMAGE' and node.image:
existing_tex = node
break
# Add frame texture
frame_tex = nodes.new('ShaderNodeTexImage')
frame_tex.image = frame_image
frame_tex.location = (existing_tex.location.x, existing_tex.location.y - 300) if existing_tex else (-300, 0)
# Add mix node for blending
mix_node = nodes.new('ShaderNodeMixRGB')
mix_node.blend_type = 'MIX'
mix_node.inputs['Fac'].default_value = blend_factor
mix_node.location = (0, 0)
# Connect textures to mix node
if existing_tex:
links.new(existing_tex.outputs['Color'], mix_node.inputs['Color1'])
links.new(frame_tex.outputs['Color'], mix_node.inputs['Color2'])
# Find principled BSDF and connect
principled = None
for node in nodes:
if node.type == 'BSDF_PRINCIPLED':
principled = node
break
if principled:
links.new(mix_node.outputs['Color'], principled.inputs['Base Color'])
print(f"🎭 Blended Polycam texture with frame (factor: {blend_factor})")
return material
def project_frame_onto_polycam_from_camera(polycam_obj, frame_image_path, camera_obj):
"""Project frame texture onto Polycam scan from specific camera angle"""
frame_image = bpy.data.images.load(frame_image_path) if isinstance(frame_image_path, str) else frame_image_path
# Calculate camera projection UV mapping
bpy.context.view_layer.objects.active = polycam_obj
polycam_obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
# Get mesh data
mesh = bmesh.from_edit_mesh(polycam_obj.data)
# Ensure UV layer
if not mesh.loops.layers.uv:
mesh.loops.layers.uv.new("Camera_Projection")
uv_layer = mesh.loops.layers.uv.active
# Get camera matrices
camera_matrix = camera_obj.matrix_world
camera_data = camera_obj.data
# Project each vertex
for face in mesh.faces:
for loop in face.loops:
vertex_world = polycam_obj.matrix_world @ loop.vert.co
# Transform to camera space
vertex_camera = camera_matrix.inverted() @ vertex_world
# Project to screen space
if vertex_camera.z < 0: # Behind camera
# Simple perspective projection
screen_x = -vertex_camera.x / vertex_camera.z
screen_y = -vertex_camera.y / vertex_camera.z
# Convert to UV coordinates (0-1)
u = (screen_x + 1) / 2
v = (screen_y + 1) / 2
# Clamp to valid range
u = max(0, min(1, u))
v = max(0, min(1, v))
loop[uv_layer].uv = (u, v)
else:
loop[uv_layer].uv = (0.5, 0.5) # Default UV for vertices behind camera
bmesh.update_edit_mesh(polycam_obj.data)
bpy.ops.object.mode_set(mode='OBJECT')
# Apply frame texture with projection mapping
material = replace_polycam_texture_with_frame(polycam_obj, frame_image, 'preserve')
print(f"📹 Projected frame onto Polycam scan from camera: {camera_obj.name}")
return material
# ============ COMPLETE POLYCAM WORKFLOWS ============
def polycam_to_frame_textured_object(polycam_file, frame_source, workflow='replace'):
"""Complete workflow: Import Polycam → Apply frame texture"""
# Import Polycam scan
polycam_obj, all_objects = import_polycam_scan(polycam_file, "Polycam_Scan")
# Optimize mesh
polycam_obj = optimize_polycam_mesh(polycam_obj, decimate_ratio=0.7)
# Handle frame source (video frame or image)
if isinstance(frame_source, tuple):
# Video frame: (video_path, frame_number)
video_path, frame_number = frame_source
frame_path, frame_image = extract_frame_from_video(video_path, frame_number)
else:
# Direct image path
frame_image = frame_source
# Apply texture based on workflow
if workflow == 'replace':
material = replace_polycam_texture_with_frame(polycam_obj, frame_image, 'preserve')
elif workflow == 'blend':
material = blend_polycam_with_frame_texture(polycam_obj, frame_image, 0.6)
elif workflow == 'project':
# Create camera for projection
bpy.ops.object.camera_add(location=(0, -5, 2))
camera = bpy.context.object
material = project_frame_onto_polycam_from_camera(polycam_obj, frame_image, camera)
return {
'polycam_object': polycam_obj,
'material': material,
'frame_image': frame_image,
'all_objects': all_objects
}
def create_polycam_terminal_scene(polycam_scan_path, terminal_screenshots):
"""Create scene with Polycam-scanned environment + terminal screenshots"""
# Import main environment scan
env_obj, env_objects = import_polycam_scan(polycam_scan_path, "Environment_Scan")
env_obj = optimize_polycam_mesh(env_obj, decimate_ratio=0.5)
# Position environment
env_obj.location = (0, 0, -1)
env_obj.scale = (2, 2, 2)
# Create terminal screenshot planes at various positions
terminal_objects = []
for i, screenshot_path in enumerate(terminal_screenshots):
# Create plane for terminal
bpy.ops.mesh.primitive_plane_add(
size=3,
location=(i * 2 - len(terminal_screenshots), 0, 1)
)
terminal_plane = bpy.context.object
terminal_plane.name = f"Terminal_Screen_{i:02d}"
# Apply screenshot texture
screenshot_image = bpy.data.images.load(screenshot_path)
# Create glowing screen material
screen_material = bpy.data.materials.new(f"Terminal_Screen_Mat_{i:02d}")
screen_material.use_nodes = True
screen_material.node_tree.nodes.clear()
nodes = screen_material.node_tree.nodes
links = screen_material.node_tree.links
# Screen material setup
output = nodes.new('ShaderNodeOutputMaterial')
emission = nodes.new('ShaderNodeEmission')
tex_image = nodes.new('ShaderNodeTexImage')
tex_image.image = screenshot_image
emission.inputs['Strength'].default_value = 3.0
links.new(tex_image.outputs['Color'], emission.inputs['Color'])
links.new(emission.outputs['Emission'], output.inputs['Surface'])
terminal_plane.data.materials.append(screen_material)
terminal_objects.append(terminal_plane)
# Add dramatic lighting
bpy.ops.object.light_add(type='AREA', location=(0, -4, 6))
key_light = bpy.context.object
key_light.data.energy = 15
key_light.data.size = 8
# Add camera
bpy.ops.object.camera_add(location=(0, -8, 3))
camera = bpy.context.object
bpy.context.scene.camera = camera
return {
'environment': env_obj,
'terminals': terminal_objects,
'camera': camera,
'lighting': [key_light]
}
# ============ POLYCAM UTILITIES ============
def auto_uv_unwrap_object(obj):
"""Auto UV unwrap for Polycam objects"""
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)
bpy.ops.object.mode_set(mode='OBJECT')
def project_uv_from_active_camera(obj):
"""Project UV from active camera view"""
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.project_from_view(camera_bounds=True, correct_aspect=True, scale_to_bounds=False)
bpy.ops.object.mode_set(mode='OBJECT')
def extract_frame_from_video(video_path, frame_number, output_path=None):
"""Extract frame from video (copied from previous artifact)"""
if output_path is None:
base_name = os.path.splitext(os.path.basename(video_path))[0]
output_path = f"/tmp/{base_name}_frame_{frame_number:04d}.png"
video_image = bpy.data.images.load(video_path)
video_image.source = 'MOVIE'
video_image.frame_current = frame_number
video_image.save_render(output_path)
return output_path, video_image
# ============ EXAMPLE USAGE ============
if __name__ == "__main__":
print("📱 Polycam + Frame Texture Integration Ready!")
print("✨ Features:")
print(" 🔄 Import Polycam scans (.ply, .obj, .fbx, .gltf)")
print(" 🎨 Replace/blend textures with video frames")
print(" 📐 Preserve original UV mapping or create new")
print(" 🎬 Project textures from camera angles")
print(" 🖥️ Create terminal + environment scenes")
print("")
print("🚀 Example workflows:")
print(" polycam_to_frame_textured_object('scan.ply', 'frame.png')")
print(" create_polycam_terminal_scene('room.obj', ['term1.png', 'term2.png'])")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment