Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save Cdaprod/1702d2defa836bf95c7b24099216a704 to your computer and use it in GitHub Desktop.
Save Cdaprod/1702d2defa836bf95c7b24099216a704 to your computer and use it in GitHub Desktop.
Blender BPY — Texture Mapping & Raycasting With iPhone Screenshots and Recordings
import bpy
import bmesh
import os
from mathutils import Vector
from bpy_extras.object_utils import world_to_camera_view
# ============ MEDIA LOADING & VALIDATION ============
def validate_media_file(file_path):
"""Validate if file exists and get media type"""
if not os.path.exists(file_path):
raise FileNotFoundError(f"Media file not found: {file_path}")
ext = os.path.splitext(file_path)[1].lower()
# Image formats
image_exts = {'.png', '.jpg', '.jpeg', '.tiff', '.tga', '.bmp', '.exr', '.hdr'}
# Video formats (supported by Blender)
video_exts = {'.mp4', '.mov', '.avi', '.mkv', '.webm', '.m4v'}
if ext in image_exts:
return 'IMAGE'
elif ext in video_exts:
return 'VIDEO'
else:
raise ValueError(f"Unsupported media format: {ext}")
def load_media_texture(media_path, texture_name=None):
"""Universal media loader for images and videos"""
if texture_name is None:
texture_name = os.path.basename(media_path)
media_type = validate_media_file(media_path)
if media_type == 'IMAGE':
# Load image
if texture_name not in bpy.data.images:
image = bpy.data.images.load(media_path)
image.name = texture_name
else:
image = bpy.data.images[texture_name]
return image, 'IMAGE'
elif media_type == 'VIDEO':
# Load video as image sequence
if texture_name not in bpy.data.images:
image = bpy.data.images.load(media_path)
image.name = texture_name
# Set as movie clip for animation
image.source = 'MOVIE'
else:
image = bpy.data.images[texture_name]
return image, 'VIDEO'
# ============ IPHONE-SPECIFIC CONFIGURATIONS ============
def get_iphone_aspect_ratios():
"""iPhone screen aspect ratios for different models"""
return {
'iphone_14_pro': (1179, 2556), # iPhone 14 Pro
'iphone_14': (1170, 2532), # iPhone 14
'iphone_13': (1170, 2532), # iPhone 13
'iphone_12': (1170, 2532), # iPhone 12
'iphone_se': (750, 1334), # iPhone SE
'standard': (1080, 1920), # Generic vertical
}
def calculate_plane_scale_for_iphone(image, iphone_model='standard'):
"""Calculate proper plane scaling for iPhone screenshots"""
ratios = get_iphone_aspect_ratios()
target_width, target_height = ratios.get(iphone_model, ratios['standard'])
# Calculate aspect ratio
aspect_ratio = target_height / target_width
# Base scale (adjust as needed for your scene)
base_width = 3.0
base_height = base_width * aspect_ratio
return (base_width, base_height, 1.0)
# ============ SCREENSHOT TEXTURE METHODS V2 ============
def create_background_plane_with_media(media_path, plane_name="Media_Plane", iphone_model='standard'):
"""Create plane with screenshot or video as material (iPhone optimized)"""
# Load media
media, media_type = load_media_texture(media_path)
# Create plane
bpy.ops.mesh.primitive_plane_add(size=2, location=(0, 0, -0.1))
plane = bpy.context.object
plane.name = plane_name
# Scale for iPhone proportions
plane.scale = calculate_plane_scale_for_iphone(media, iphone_model)
# Create material
material = bpy.data.materials.new(name=f"{plane_name}_Material")
material.use_nodes = True
material.node_tree.nodes.clear()
# Create nodes
nodes = material.node_tree.nodes
links = material.node_tree.links
# Output node
output = nodes.new('ShaderNodeOutputMaterial')
# Image texture node
tex_image = nodes.new('ShaderNodeTexImage')
tex_image.image = media
# For videos, enable auto-refresh
if media_type == 'VIDEO':
tex_image.image.source = 'MOVIE'
tex_image.image_user.use_auto_refresh = True
tex_image.image_user.frame_duration = 250 # Adjust for video length
# Principled BSDF
principled = nodes.new('ShaderNodeBsdfPrincipled')
principled.inputs['Roughness'].default_value = 0.0 # Perfect screen surface
principled.inputs['Emission Strength'].default_value = 1.0 # Screen glow
# Connect nodes
links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
links.new(tex_image.outputs['Color'], principled.inputs['Emission'])
links.new(principled.outputs['BSDF'], output.inputs['Surface'])
# Assign material
plane.data.materials.append(material)
return plane, material, media, media_type
def create_layered_media_setup(media_paths, layer_spacing=0.2, iphone_model='standard'):
"""Create multiple screenshot/video layers at different depths"""
layers = []
for i, media_path in enumerate(media_paths):
try:
# Create plane for this layer
bpy.ops.mesh.primitive_plane_add(
size=2,
location=(0, 0, -i * layer_spacing)
)
plane = bpy.context.object
plane.name = f"Media_Layer_{i:02d}"
plane.scale = calculate_plane_scale_for_iphone(None, iphone_model)
# Load media and create material
media, media_type = load_media_texture(media_path, f"Layer_{i:02d}_Media")
# Create emission material
material = bpy.data.materials.new(name=f"Media_Mat_{i:02d}")
material.use_nodes = True
material.node_tree.nodes.clear()
nodes = material.node_tree.nodes
links = material.node_tree.links
# Nodes
output = nodes.new('ShaderNodeOutputMaterial')
emission = nodes.new('ShaderNodeEmission')
tex_image = nodes.new('ShaderNodeTexImage')
tex_image.image = media
# Video setup
if media_type == 'VIDEO':
tex_image.image.source = 'MOVIE'
tex_image.image_user.use_auto_refresh = True
# Offset video start times for layers
tex_image.image_user.frame_offset = i * 10
# Adjust emission based on depth (closer = brighter)
emission_strength = 3.0 - (i * 0.4)
emission.inputs['Strength'].default_value = max(0.3, emission_strength)
# Connect
links.new(tex_image.outputs['Color'], emission.inputs['Color'])
links.new(emission.outputs['Emission'], output.inputs['Surface'])
plane.data.materials.append(material)
layers.append({
'plane': plane,
'material': material,
'media': media,
'type': media_type,
'depth': i
})
except Exception as e:
print(f"⚠️ Failed to load layer {i}: {e}")
continue
return layers
def create_animated_screen_material(video_path, material_name="Animated_Screen"):
"""Create material specifically for video textures with screen-like properties"""
media, media_type = load_media_texture(video_path)
if media_type != 'VIDEO':
print("⚠️ Warning: Not a video file, creating static material")
# Create material
material = bpy.data.materials.new(name=material_name)
material.use_nodes = True
material.node_tree.nodes.clear()
nodes = material.node_tree.nodes
links = material.node_tree.links
# Nodes for realistic iPhone screen
output = nodes.new('ShaderNodeOutputMaterial')
mix_shader = nodes.new('ShaderNodeMixShader')
# Screen emission (content)
emission = nodes.new('ShaderNodeEmission')
tex_image = nodes.new('ShaderNodeTexImage')
tex_image.image = media
# Screen reflection (glass surface)
glossy = nodes.new('ShaderNodeBsdfGlossy')
glossy.inputs['Roughness'].default_value = 0.1
# Fresnel for realistic screen reflection
fresnel = nodes.new('ShaderNodeFresnel')
fresnel.inputs['IOR'].default_value = 1.45 # Glass IOR
# Video settings
if media_type == 'VIDEO':
media.source = 'MOVIE'
tex_image.image_user.use_auto_refresh = True
tex_image.image_user.use_cyclic = True # Loop video
# Connect nodes
links.new(tex_image.outputs['Color'], emission.inputs['Color'])
emission.inputs['Strength'].default_value = 2.0
links.new(fresnel.outputs['Fac'], mix_shader.inputs['Fac'])
links.new(emission.outputs['Emission'], mix_shader.inputs['Shader'])
links.new(glossy.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(mix_shader.outputs['Shader'], output.inputs['Surface'])
return material, media
# ============ ENHANCED RAYCASTING V2 ============
def advanced_raycast_from_media_pixels(media_plane, camera, resolution=(100, 100)):
"""Advanced raycasting from media plane pixels with iPhone screen mapping"""
# Get plane dimensions and material info
plane_obj = media_plane
material = plane_obj.data.materials[0] if plane_obj.data.materials else None
if not material or not material.use_nodes:
print("❌ Media plane has no proper material for raycasting")
return []
# Find image texture node
image_node = None
for node in material.node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
image_node = node
break
if not image_node:
print("❌ No image texture found in material")
return []
# Get image dimensions
image = image_node.image
img_width, img_height = image.size
# Calculate UV coordinates for raycasting
raycast_results = []
for y in range(resolution[1]):
for x in range(resolution[0]):
# Calculate UV coordinates (0-1 range)
u = x / (resolution[0] - 1)
v = y / (resolution[1] - 1)
# Convert UV to world space on the plane
# Assuming plane is at origin with standard orientation
plane_local = Vector(((u - 0.5) * 2, (v - 0.5) * 2, 0))
world_pos = plane_obj.matrix_world @ plane_local
# Calculate ray direction from camera to this point
cam_pos = camera.matrix_world.translation
ray_direction = (world_pos - cam_pos).normalized()
# Perform raycast
hit, hit_location, hit_normal, hit_index, hit_object = bpy.context.scene.ray_cast(
bpy.context.view_layer.depsgraph,
cam_pos,
ray_direction,
distance=1000.0
)
if hit and hit_object != plane_obj: # Don't count hits on the plane itself
# Calculate pixel coordinates in image
pixel_x = int(u * img_width)
pixel_y = int((1 - v) * img_height) # Flip Y for image coordinates
raycast_results.append({
'uv': (u, v),
'pixel': (pixel_x, pixel_y),
'world_pos': world_pos.copy(),
'hit_location': hit_location.copy(),
'hit_normal': hit_normal.copy(),
'hit_object': hit_object,
'distance': (hit_location - cam_pos).length
})
return raycast_results
def create_interactive_hotspots_from_raycast(raycast_results, hotspot_threshold=10):
"""Create interactive hotspots based on raycast results"""
hotspots = []
# Group nearby raycast hits
processed = set()
for i, result in enumerate(raycast_results):
if i in processed:
continue
# Find nearby hits
cluster = [result]
processed.add(i)
for j, other_result in enumerate(raycast_results[i+1:], i+1):
if j in processed:
continue
distance = (result['world_pos'] - other_result['world_pos']).length
if distance < hotspot_threshold:
cluster.append(other_result)
processed.add(j)
if len(cluster) >= 3: # Minimum cluster size
# Calculate center
center = Vector((0, 0, 0))
for hit in cluster:
center += hit['world_pos']
center /= len(cluster)
# Create hotspot indicator
bpy.ops.mesh.primitive_uv_sphere_add(
radius=0.05,
location=center
)
hotspot_obj = bpy.context.object
hotspot_obj.name = f"Hotspot_{len(hotspots):02d}"
# Create glowing material
hotspot_mat = bpy.data.materials.new(f"Hotspot_Mat_{len(hotspots):02d}")
hotspot_mat.use_nodes = True
hotspot_mat.node_tree.nodes.clear()
nodes = hotspot_mat.node_tree.nodes
emission = nodes.new('ShaderNodeEmission')
output = nodes.new('ShaderNodeOutputMaterial')
emission.inputs['Color'].default_value = (1.0, 0.3, 0.1, 1.0) # Orange glow
emission.inputs['Strength'].default_value = 5.0
hotspot_mat.node_tree.links.new(emission.outputs[0], output.inputs[0])
hotspot_obj.data.materials.append(hotspot_mat)
hotspots.append({
'object': hotspot_obj,
'center': center,
'cluster_size': len(cluster),
'cluster_data': cluster
})
return hotspots
# ============ COMPLETE SETUP FUNCTIONS ============
def setup_iphone_media_scene(media_paths, iphone_model='iphone_14'):
"""Complete iPhone media scene setup with screenshots and videos"""
# Clear scene
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
print(f"📱 Setting up iPhone {iphone_model} media scene...")
# Create layered media setup
layers = create_layered_media_setup(media_paths, 0.3, iphone_model)
print(f"📸 Created {len(layers)} media layers")
# Add camera optimized for iPhone content
bpy.ops.object.camera_add(location=(0, -8, 4))
camera = bpy.context.object
camera.name = "iPhone_Camera"
camera.data.lens = 50 # Good for iPhone perspective
camera.data.sensor_width = 36
# Point camera at layers
bpy.ops.object.empty_add(location=(0, 0, -len(layers) * 0.15))
target = bpy.context.object
target.name = "Camera_Target"
track_constraint = camera.constraints.new('TRACK_TO')
track_constraint.target = target
track_constraint.track_axis = 'TRACK_NEGATIVE_Z'
track_constraint.up_axis = 'UP_Y'
# Set active camera
bpy.context.scene.camera = camera
# Lighting setup for iPhone screens
bpy.ops.object.light_add(type='AREA', location=(2, -5, 6))
key_light = bpy.context.object
key_light.data.energy = 10
key_light.data.size = 4
key_light.data.color = (1.0, 1.0, 1.0)
# Render settings
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
scene.cycles.samples = 128
scene.render.film_transparent = True
# Set iPhone resolution
ratios = get_iphone_aspect_ratios()
width, height = ratios[iphone_model]
scene.render.resolution_x = width // 2 # Reasonable render size
scene.render.resolution_y = height // 2
return {
'layers': layers,
'camera': camera,
'target': target,
'lighting': [key_light]
}
def create_interactive_iphone_scene(screenshot_path, video_paths):
"""Create interactive scene with iPhone screenshot and overlaid videos"""
# Main screenshot as background
bg_plane, bg_mat, bg_media, bg_type = create_background_plane_with_media(
screenshot_path, "iPhone_Screenshot", 'iphone_14'
)
bg_plane.location.z = -1.0
# Overlay videos at different positions
video_overlays = []
for i, video_path in enumerate(video_paths):
overlay_plane, overlay_mat, overlay_media, overlay_type = create_background_plane_with_media(
video_path, f"Video_Overlay_{i}", 'iphone_14'
)
# Position overlays
overlay_plane.location.z = -0.5 + (i * 0.1)
overlay_plane.location.x = (i - len(video_paths)/2) * 0.5
overlay_plane.scale = (0.8, 0.8, 1.0) # Smaller overlays
video_overlays.append(overlay_plane)
# Add camera
bpy.ops.object.camera_add(location=(0, -6, 2))
camera = bpy.context.object
bpy.context.scene.camera = camera
# Perform raycasting analysis
print("🔍 Performing raycast analysis...")
raycast_results = advanced_raycast_from_media_pixels(bg_plane, camera, (50, 88))
# Create interactive hotspots
if raycast_results:
hotspots = create_interactive_hotspots_from_raycast(raycast_results)
print(f"🎯 Created {len(hotspots)} interactive hotspots")
return {
'background': bg_plane,
'overlays': video_overlays,
'camera': camera,
'hotspots': hotspots,
'raycast_data': raycast_results
}
return {
'background': bg_plane,
'overlays': video_overlays,
'camera': camera,
'hotspots': [],
'raycast_data': []
}
# ============ EXAMPLE USAGE ============
if __name__ == "__main__":
# Example paths - replace with your actual files
SCREENSHOT_PATHS = [
"/path/to/terminal_screenshot_1.png",
"/path/to/terminal_screenshot_2.png",
]
VIDEO_PATHS = [
"/path/to/terminal_recording_1.mp4",
"/path/to/terminal_recording_2.mov",
]
# Method 1: Simple layered setup
# scene_data = setup_iphone_media_scene(SCREENSHOT_PATHS + VIDEO_PATHS)
# Method 2: Interactive scene with raycasting
# if SCREENSHOT_PATHS and VIDEO_PATHS:
# interactive_scene = create_interactive_iphone_scene(
# SCREENSHOT_PATHS[0],
# VIDEO_PATHS
# )
print("📱 iPhone Screenshot & Video Texture System V2 Ready!")
print("🎬 Supports both images and videos with auto-detection")
print("📐 iPhone aspect ratio optimization")
print("🔍 Advanced raycasting with interactive hotspots")
print("💡 Realistic screen materials with emission and reflection")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment