|
import bpy |
|
import bmesh |
|
import os |
|
from mathutils import Vector, Matrix |
|
import mathutils |
|
|
|
# ============ POLYCAM IMPORT UTILITIES ============ |
|
|
|
def import_polycam_scan(file_path, scan_name="Polycam_Scan"): |
|
"""Import Polycam scan (supports .ply, .obj, .fbx, .gltf formats)""" |
|
|
|
ext = os.path.splitext(file_path)[1].lower() |
|
|
|
# Clear selection |
|
bpy.ops.object.select_all(action='DESELECT') |
|
|
|
# Import based on file format |
|
if ext == '.ply': |
|
bpy.ops.import_mesh.ply(filepath=file_path) |
|
elif ext == '.obj': |
|
bpy.ops.import_scene.obj(filepath=file_path) |
|
elif ext == '.fbx': |
|
bpy.ops.import_scene.fbx(filepath=file_path) |
|
elif ext in ['.gltf', '.glb']: |
|
bpy.ops.import_scene.gltf(filepath=file_path) |
|
else: |
|
raise ValueError(f"Unsupported Polycam format: {ext}") |
|
|
|
# Get imported object(s) |
|
imported_objects = [obj for obj in bpy.context.selected_objects] |
|
|
|
if imported_objects: |
|
main_object = imported_objects[0] |
|
main_object.name = scan_name |
|
print(f"✅ Imported Polycam scan: {scan_name}") |
|
print(f"📊 Vertices: {len(main_object.data.vertices)}") |
|
print(f"📐 Faces: {len(main_object.data.polygons)}") |
|
return main_object, imported_objects |
|
else: |
|
raise Exception("Failed to import Polycam scan") |
|
|
|
def optimize_polycam_mesh(polycam_obj, decimate_ratio=0.5, smooth_iterations=2): |
|
"""Optimize Polycam mesh for better performance and texturing""" |
|
|
|
bpy.context.view_layer.objects.active = polycam_obj |
|
polycam_obj.select_set(True) |
|
|
|
# Enter edit mode |
|
bpy.ops.object.mode_set(mode='EDIT') |
|
|
|
# Clean up mesh |
|
bpy.ops.mesh.select_all(action='SELECT') |
|
|
|
# Remove doubles/merge vertices |
|
bpy.ops.mesh.remove_doubles(threshold=0.001) |
|
|
|
# Fill holes |
|
bpy.ops.mesh.fill_holes(sides=0) |
|
|
|
# Recalculate normals |
|
bpy.ops.mesh.normals_make_consistent(inside=False) |
|
|
|
# Exit edit mode |
|
bpy.ops.object.mode_set(mode='OBJECT') |
|
|
|
# Add decimate modifier for optimization |
|
if decimate_ratio < 1.0: |
|
decimate_mod = polycam_obj.modifiers.new(name="Decimate", type='DECIMATE') |
|
decimate_mod.ratio = decimate_ratio |
|
decimate_mod.use_collapse_triangulate = True |
|
|
|
# Add smooth shading |
|
if smooth_iterations > 0: |
|
smooth_mod = polycam_obj.modifiers.new(name="Smooth", type='SMOOTH') |
|
smooth_mod.iterations = smooth_iterations |
|
|
|
print(f"🔧 Optimized Polycam mesh: {polycam_obj.name}") |
|
return polycam_obj |
|
|
|
# ============ POLYCAM + FRAME TEXTURE WORKFLOWS ============ |
|
|
|
def replace_polycam_texture_with_frame(polycam_obj, frame_image_path, uv_method='preserve'): |
|
"""Replace Polycam's original texture with frame from video/screenshot""" |
|
|
|
# Load frame image |
|
if isinstance(frame_image_path, str): |
|
frame_image = bpy.data.images.load(frame_image_path) |
|
else: |
|
frame_image = frame_image_path |
|
|
|
# Handle UV mapping |
|
if uv_method == 'preserve': |
|
# Keep existing UV coordinates from Polycam |
|
print("📐 Preserving original Polycam UV mapping") |
|
elif uv_method == 'auto': |
|
# Create new UV mapping |
|
auto_uv_unwrap_object(polycam_obj) |
|
elif uv_method == 'project': |
|
# Project from current camera view |
|
project_uv_from_active_camera(polycam_obj) |
|
|
|
# Create new material with frame texture |
|
material = bpy.data.materials.new(f"{polycam_obj.name}_Frame_Material") |
|
material.use_nodes = True |
|
material.node_tree.nodes.clear() |
|
|
|
nodes = material.node_tree.nodes |
|
links = material.node_tree.links |
|
|
|
# Create enhanced material nodes |
|
output = nodes.new('ShaderNodeOutputMaterial') |
|
principled = nodes.new('ShaderNodeBsdfPrincipled') |
|
tex_image = nodes.new('ShaderNodeTexImage') |
|
tex_coord = nodes.new('ShaderNodeTexCoord') |
|
mapping = nodes.new('ShaderNodeMapping') |
|
|
|
# Optional: Add normal map for depth (if Polycam had good geometry) |
|
normal_map = nodes.new('ShaderNodeNormalMap') |
|
bump = nodes.new('ShaderNodeBump') |
|
|
|
# Set frame image |
|
tex_image.image = frame_image |
|
tex_image.interpolation = 'Linear' |
|
|
|
# Enhanced material settings for scanned objects |
|
principled.inputs['Roughness'].default_value = 0.3 |
|
principled.inputs['Specular'].default_value = 0.1 |
|
bump.inputs['Strength'].default_value = 0.2 # Subtle surface detail |
|
|
|
# Connect nodes |
|
links.new(tex_coord.outputs['UV'], mapping.inputs['Vector']) |
|
links.new(mapping.outputs['Vector'], tex_image.inputs['Vector']) |
|
links.new(tex_image.outputs['Color'], principled.inputs['Base Color']) |
|
|
|
# Add subtle bump mapping from color |
|
links.new(tex_image.outputs['Color'], bump.inputs['Height']) |
|
links.new(bump.outputs['Normal'], principled.inputs['Normal']) |
|
|
|
links.new(principled.outputs['BSDF'], output.inputs['Surface']) |
|
|
|
# Replace existing materials |
|
polycam_obj.data.materials.clear() |
|
polycam_obj.data.materials.append(material) |
|
|
|
print(f"🎨 Applied frame texture to Polycam scan: {polycam_obj.name}") |
|
return material |
|
|
|
def blend_polycam_with_frame_texture(polycam_obj, frame_image_path, blend_factor=0.5): |
|
"""Blend original Polycam texture with frame texture""" |
|
|
|
frame_image = bpy.data.images.load(frame_image_path) if isinstance(frame_image_path, str) else frame_image_path |
|
|
|
# Get or create material |
|
if polycam_obj.data.materials: |
|
material = polycam_obj.data.materials[0] |
|
if not material.use_nodes: |
|
material.use_nodes = True |
|
else: |
|
material = bpy.data.materials.new(f"{polycam_obj.name}_Blended_Material") |
|
material.use_nodes = True |
|
polycam_obj.data.materials.append(material) |
|
|
|
nodes = material.node_tree.nodes |
|
links = material.node_tree.links |
|
|
|
# Find existing image texture (from Polycam) |
|
existing_tex = None |
|
for node in nodes: |
|
if node.type == 'TEX_IMAGE' and node.image: |
|
existing_tex = node |
|
break |
|
|
|
# Add frame texture |
|
frame_tex = nodes.new('ShaderNodeTexImage') |
|
frame_tex.image = frame_image |
|
frame_tex.location = (existing_tex.location.x, existing_tex.location.y - 300) if existing_tex else (-300, 0) |
|
|
|
# Add mix node for blending |
|
mix_node = nodes.new('ShaderNodeMixRGB') |
|
mix_node.blend_type = 'MIX' |
|
mix_node.inputs['Fac'].default_value = blend_factor |
|
mix_node.location = (0, 0) |
|
|
|
# Connect textures to mix node |
|
if existing_tex: |
|
links.new(existing_tex.outputs['Color'], mix_node.inputs['Color1']) |
|
links.new(frame_tex.outputs['Color'], mix_node.inputs['Color2']) |
|
|
|
# Find principled BSDF and connect |
|
principled = None |
|
for node in nodes: |
|
if node.type == 'BSDF_PRINCIPLED': |
|
principled = node |
|
break |
|
|
|
if principled: |
|
links.new(mix_node.outputs['Color'], principled.inputs['Base Color']) |
|
|
|
print(f"🎭 Blended Polycam texture with frame (factor: {blend_factor})") |
|
return material |
|
|
|
def project_frame_onto_polycam_from_camera(polycam_obj, frame_image_path, camera_obj): |
|
"""Project frame texture onto Polycam scan from specific camera angle""" |
|
|
|
frame_image = bpy.data.images.load(frame_image_path) if isinstance(frame_image_path, str) else frame_image_path |
|
|
|
# Calculate camera projection UV mapping |
|
bpy.context.view_layer.objects.active = polycam_obj |
|
polycam_obj.select_set(True) |
|
bpy.ops.object.mode_set(mode='EDIT') |
|
|
|
# Get mesh data |
|
mesh = bmesh.from_edit_mesh(polycam_obj.data) |
|
|
|
# Ensure UV layer |
|
if not mesh.loops.layers.uv: |
|
mesh.loops.layers.uv.new("Camera_Projection") |
|
uv_layer = mesh.loops.layers.uv.active |
|
|
|
# Get camera matrices |
|
camera_matrix = camera_obj.matrix_world |
|
camera_data = camera_obj.data |
|
|
|
# Project each vertex |
|
for face in mesh.faces: |
|
for loop in face.loops: |
|
vertex_world = polycam_obj.matrix_world @ loop.vert.co |
|
|
|
# Transform to camera space |
|
vertex_camera = camera_matrix.inverted() @ vertex_world |
|
|
|
# Project to screen space |
|
if vertex_camera.z < 0: # Behind camera |
|
# Simple perspective projection |
|
screen_x = -vertex_camera.x / vertex_camera.z |
|
screen_y = -vertex_camera.y / vertex_camera.z |
|
|
|
# Convert to UV coordinates (0-1) |
|
u = (screen_x + 1) / 2 |
|
v = (screen_y + 1) / 2 |
|
|
|
# Clamp to valid range |
|
u = max(0, min(1, u)) |
|
v = max(0, min(1, v)) |
|
|
|
loop[uv_layer].uv = (u, v) |
|
else: |
|
loop[uv_layer].uv = (0.5, 0.5) # Default UV for vertices behind camera |
|
|
|
bmesh.update_edit_mesh(polycam_obj.data) |
|
bpy.ops.object.mode_set(mode='OBJECT') |
|
|
|
# Apply frame texture with projection mapping |
|
material = replace_polycam_texture_with_frame(polycam_obj, frame_image, 'preserve') |
|
|
|
print(f"📹 Projected frame onto Polycam scan from camera: {camera_obj.name}") |
|
return material |
|
|
|
# ============ COMPLETE POLYCAM WORKFLOWS ============ |
|
|
|
def polycam_to_frame_textured_object(polycam_file, frame_source, workflow='replace'): |
|
"""Complete workflow: Import Polycam → Apply frame texture""" |
|
|
|
# Import Polycam scan |
|
polycam_obj, all_objects = import_polycam_scan(polycam_file, "Polycam_Scan") |
|
|
|
# Optimize mesh |
|
polycam_obj = optimize_polycam_mesh(polycam_obj, decimate_ratio=0.7) |
|
|
|
# Handle frame source (video frame or image) |
|
if isinstance(frame_source, tuple): |
|
# Video frame: (video_path, frame_number) |
|
video_path, frame_number = frame_source |
|
frame_path, frame_image = extract_frame_from_video(video_path, frame_number) |
|
else: |
|
# Direct image path |
|
frame_image = frame_source |
|
|
|
# Apply texture based on workflow |
|
if workflow == 'replace': |
|
material = replace_polycam_texture_with_frame(polycam_obj, frame_image, 'preserve') |
|
elif workflow == 'blend': |
|
material = blend_polycam_with_frame_texture(polycam_obj, frame_image, 0.6) |
|
elif workflow == 'project': |
|
# Create camera for projection |
|
bpy.ops.object.camera_add(location=(0, -5, 2)) |
|
camera = bpy.context.object |
|
material = project_frame_onto_polycam_from_camera(polycam_obj, frame_image, camera) |
|
|
|
return { |
|
'polycam_object': polycam_obj, |
|
'material': material, |
|
'frame_image': frame_image, |
|
'all_objects': all_objects |
|
} |
|
|
|
def create_polycam_terminal_scene(polycam_scan_path, terminal_screenshots): |
|
"""Create scene with Polycam-scanned environment + terminal screenshots""" |
|
|
|
# Import main environment scan |
|
env_obj, env_objects = import_polycam_scan(polycam_scan_path, "Environment_Scan") |
|
env_obj = optimize_polycam_mesh(env_obj, decimate_ratio=0.5) |
|
|
|
# Position environment |
|
env_obj.location = (0, 0, -1) |
|
env_obj.scale = (2, 2, 2) |
|
|
|
# Create terminal screenshot planes at various positions |
|
terminal_objects = [] |
|
for i, screenshot_path in enumerate(terminal_screenshots): |
|
# Create plane for terminal |
|
bpy.ops.mesh.primitive_plane_add( |
|
size=3, |
|
location=(i * 2 - len(terminal_screenshots), 0, 1) |
|
) |
|
terminal_plane = bpy.context.object |
|
terminal_plane.name = f"Terminal_Screen_{i:02d}" |
|
|
|
# Apply screenshot texture |
|
screenshot_image = bpy.data.images.load(screenshot_path) |
|
|
|
# Create glowing screen material |
|
screen_material = bpy.data.materials.new(f"Terminal_Screen_Mat_{i:02d}") |
|
screen_material.use_nodes = True |
|
screen_material.node_tree.nodes.clear() |
|
|
|
nodes = screen_material.node_tree.nodes |
|
links = screen_material.node_tree.links |
|
|
|
# Screen material setup |
|
output = nodes.new('ShaderNodeOutputMaterial') |
|
emission = nodes.new('ShaderNodeEmission') |
|
tex_image = nodes.new('ShaderNodeTexImage') |
|
tex_image.image = screenshot_image |
|
|
|
emission.inputs['Strength'].default_value = 3.0 |
|
|
|
links.new(tex_image.outputs['Color'], emission.inputs['Color']) |
|
links.new(emission.outputs['Emission'], output.inputs['Surface']) |
|
|
|
terminal_plane.data.materials.append(screen_material) |
|
terminal_objects.append(terminal_plane) |
|
|
|
# Add dramatic lighting |
|
bpy.ops.object.light_add(type='AREA', location=(0, -4, 6)) |
|
key_light = bpy.context.object |
|
key_light.data.energy = 15 |
|
key_light.data.size = 8 |
|
|
|
# Add camera |
|
bpy.ops.object.camera_add(location=(0, -8, 3)) |
|
camera = bpy.context.object |
|
bpy.context.scene.camera = camera |
|
|
|
return { |
|
'environment': env_obj, |
|
'terminals': terminal_objects, |
|
'camera': camera, |
|
'lighting': [key_light] |
|
} |
|
|
|
# ============ POLYCAM UTILITIES ============ |
|
|
|
def auto_uv_unwrap_object(obj): |
|
"""Auto UV unwrap for Polycam objects""" |
|
bpy.context.view_layer.objects.active = obj |
|
obj.select_set(True) |
|
bpy.ops.object.mode_set(mode='EDIT') |
|
bpy.ops.mesh.select_all(action='SELECT') |
|
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) |
|
bpy.ops.object.mode_set(mode='OBJECT') |
|
|
|
def project_uv_from_active_camera(obj): |
|
"""Project UV from active camera view""" |
|
bpy.context.view_layer.objects.active = obj |
|
obj.select_set(True) |
|
bpy.ops.object.mode_set(mode='EDIT') |
|
bpy.ops.mesh.select_all(action='SELECT') |
|
bpy.ops.uv.project_from_view(camera_bounds=True, correct_aspect=True, scale_to_bounds=False) |
|
bpy.ops.object.mode_set(mode='OBJECT') |
|
|
|
def extract_frame_from_video(video_path, frame_number, output_path=None): |
|
"""Extract frame from video (copied from previous artifact)""" |
|
if output_path is None: |
|
base_name = os.path.splitext(os.path.basename(video_path))[0] |
|
output_path = f"/tmp/{base_name}_frame_{frame_number:04d}.png" |
|
|
|
video_image = bpy.data.images.load(video_path) |
|
video_image.source = 'MOVIE' |
|
video_image.frame_current = frame_number |
|
video_image.save_render(output_path) |
|
|
|
return output_path, video_image |
|
|
|
# ============ EXAMPLE USAGE ============ |
|
|
|
if __name__ == "__main__": |
|
print("📱 Polycam + Frame Texture Integration Ready!") |
|
print("✨ Features:") |
|
print(" 🔄 Import Polycam scans (.ply, .obj, .fbx, .gltf)") |
|
print(" 🎨 Replace/blend textures with video frames") |
|
print(" 📐 Preserve original UV mapping or create new") |
|
print(" 🎬 Project textures from camera angles") |
|
print(" 🖥️ Create terminal + environment scenes") |
|
print("") |
|
print("🚀 Example workflows:") |
|
print(" polycam_to_frame_textured_object('scan.ply', 'frame.png')") |
|
print(" create_polycam_terminal_scene('room.obj', ['term1.png', 'term2.png'])") |