Skip to content

Instantly share code, notes, and snippets.

@kitlith
Last active April 23, 2025 03:24
Show Gist options
  • Save kitlith/35440e3b7106ba4867ba8b9deb40e6b7 to your computer and use it in GitHub Desktop.
Save kitlith/35440e3b7106ba4867ba8b9deb40e6b7 to your computer and use it in GitHub Desktop.
Lit Screenspace Decal using `_CameraDepthNormalsTexture` for smooth normals, cobbled together from a bunch of pieces that I should go credit at some point.

World Setup

To make this work in the Main Camera:

  • Setup an Ambient Occlusion Postprocessing pass
    • Mode: Scalable Ambient Occlusion
    • Intensity: nonzero. (not 100% sure this is required, maybe it's only required to setup but then you can get rid of it?)
  • In the latest open beta (1622), you can set the CameraDepthMode on the main camera directly!

To make this work in World Mirrors:

  • Add the MirrorDepthNormal UdonBehavior to your world's mirrors. (or something that does the equivilant.)

To make this work in the Personal Mirror:

  • Add the MirrorDepthNormal UdonBehavior to an empty object in your scene, and set the MirrorCamPath public variable to "/MirrorCamMirror"

I couldn't come up with a way to make it work in the Face Mirror, but that largely shouldn't matter unless you're wearing an avatar that uses a shader that requires the DepthNormal texture.

Cameras that the players spawn seem to inherit something from the Main Camera, whether that's the DepthNormal flag, or postprocessing settings or something, so I didn't do anything special for those.

#pragma once
#include "UnityPBSLighting.cginc"
#include "AutoLight.cginc"
struct appdata
{
float4 vertex : POSITION;
//float2 uv : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
float4 _Tint;
float _Metallic;
float _Smoothness;
float3 _Emission;
float _UseDepthNormals;
struct v2f
{
float4 vertex : SV_Position;
float4 clipPos : TEXCOORD0;
float3 worldPos: POSITIONT;
float2 screenPosition: TEXCOORD1;
// TODO: screenPos?
//SHADOW_COORDS(1)
nointerpolation float4x4 inverseVP : IVP;
UNITY_VERTEX_OUTPUT_STEREO
};
// TODO: consider doing a per-sample computation.
//UNITY_DECLARE_DEPTH_TEXTURE_MS(_CameraDepthNormalsTexture);
UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthTexture);
float4 _CameraDepthTexture_TexelSize;
UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthNormalsTexture);
float4 _CameraDepthNormalsTexture_TexelSize;
#define MY_SAMPLE_RAW_DEPTH_TEXTURE SAMPLE_RAW_DEPTH_TEXTURE
#define MY_SAMPLE_RAW_DEPTH_TEXTURE_PROJ SAMPLE_RAW_DEPTH_TEXTURE_PROJ
float4x4 inverse(float4x4 mat)
{
float4x4 M = transpose(mat);
float m01xy = M[0].x * M[1].y - M[0].y * M[1].x;
float m01xz = M[0].x * M[1].z - M[0].z * M[1].x;
float m01xw = M[0].x * M[1].w - M[0].w * M[1].x;
float m01yz = M[0].y * M[1].z - M[0].z * M[1].y;
float m01yw = M[0].y * M[1].w - M[0].w * M[1].y;
float m01zw = M[0].z * M[1].w - M[0].w * M[1].z;
float m23xy = M[2].x * M[3].y - M[2].y * M[3].x;
float m23xz = M[2].x * M[3].z - M[2].z * M[3].x;
float m23xw = M[2].x * M[3].w - M[2].w * M[3].x;
float m23yz = M[2].y * M[3].z - M[2].z * M[3].y;
float m23yw = M[2].y * M[3].w - M[2].w * M[3].y;
float m23zw = M[2].z * M[3].w - M[2].w * M[3].z;
float4 adjM0, adjM1, adjM2, adjM3;
adjM0.x =+ dot(M[1].yzw, float3(m23zw, - m23yw, m23yz));
adjM0.y =- dot(M[0].yzw, float3(m23zw, - m23yw, m23yz));
adjM0.z =+ dot(M[3].yzw, float3(m01zw, - m01yw, m01yz));
adjM0.w =- dot(M[2].yzw, float3(m01zw, - m01yw, m01yz));
adjM1.x =- dot(M[1].xzw, float3(m23zw, - m23xw, m23xz));
adjM1.y =+ dot(M[0].xzw, float3(m23zw, - m23xw, m23xz));
adjM1.z =- dot(M[3].xzw, float3(m01zw, - m01xw, m01xz));
adjM1.w =+ dot(M[2].xzw, float3(m01zw, - m01xw, m01xz));
adjM2.x =+ dot(M[1].xyw, float3(m23yw, - m23xw, m23xy));
adjM2.y =- dot(M[0].xyw, float3(m23yw, - m23xw, m23xy));
adjM2.z =+ dot(M[3].xyw, float3(m01yw, - m01xw, m01xy));
adjM2.w =- dot(M[2].xyw, float3(m01yw, - m01xw, m01xy));
adjM3.x =- dot(M[1].xyz, float3(m23yz, - m23xz, m23xy));
adjM3.y =+ dot(M[0].xyz, float3(m23yz, - m23xz, m23xy));
adjM3.z =- dot(M[3].xyz, float3(m01yz, - m01xz, m01xy));
adjM3.w =+ dot(M[2].xyz, float3(m01yz, - m01xz, m01xy));
float invDet = rcp(dot(M[0].xyzw, float4(adjM0.x, adjM1.x, adjM2.x, adjM3.x)));
return transpose(float4x4(adjM0 * invDet, adjM1 * invDet, adjM2 * invDet, adjM3 * invDet));
}
v2f vert (appdata v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_OUTPUT(v2f, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
float4 vertexClip = UnityObjectToClipPos(v.vertex);
o.vertex = vertexClip;
o.clipPos = vertexClip; // sure would be nice if i could use o.vertex for this in the fragment shader instead.
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
//TRANSFER_SHADOW(o);
o.inverseVP = inverse(UNITY_MATRIX_VP);
// Save the clip space position so we can use it later.
// This also handles situations where the Y is flipped.
float2 suv = o.vertex * float2( 0.5, 0.5*_ProjectionParams.x);
// Tricky, constants like the 0.5 and the second paramter
// need to be premultiplied by o.vertex.w.
o.screenPosition = TransformStereoScreenSpaceTex( suv+0.5*o.vertex.w, o.vertex.w );
return o;
}
// from https://github.com/cnlohr/shadertrixx
// Inspired by Internal_ScreenSpaceeShadow implementation. This was adapted by lyuma.
// This code can be found on google if you search for "computeCameraSpacePosFromDepthAndInvProjMat"
// Note: The output of this will still need to be adjusted. It is NOT in world space units.
float GetLinearZFromZDepth_WorksWithMirrors(float zDepthFromMap, float2 screenUV)
{
#if defined(UNITY_REVERSED_Z)
zDepthFromMap = 1 - zDepthFromMap;
// When using a mirror, the far plane is whack. This just checks for it and aborts.
if( zDepthFromMap >= 1.0 ) return _ProjectionParams.z;
#endif
float4 clipPos = float4(screenUV.xy, zDepthFromMap, 1.0);
clipPos.xyz = 2.0f * clipPos.xyz - 1.0f;
float4 camPos = mul(unity_CameraInvProjection, clipPos);
return -camPos.z / camPos.w;
}
float3 worldSpaceDirection;
float perspectiveFactor;
float _VRChatMirrorMode;
float getRawDepth(float2 uv) { return SAMPLE_DEPTH_TEXTURE_LOD(_CameraDepthTexture, float4(uv, 0.0, 0.0)); }
// inspired by keijiro's depth inverse projection
// https://github.com/keijiro/DepthInverseProjection
// constructs view space ray at the far clip plane from the screen uv
// then multiplies that ray by the linear 01 depth
float3 viewSpacePosAtScreenUV(float2 uv)
{
float rawDepth = getRawDepth(uv);
UNITY_BRANCH // uniform control flow
if (_VRChatMirrorMode != 0) {
// TODO: figure out why this causes stereo disparity outside of the mirror???
float eyeDepthWorld =
GetLinearZFromZDepth_WorksWithMirrors(rawDepth, uv ) * perspectiveFactor;
return eyeDepthWorld * mul(unity_CameraInvProjection, float4(uv * 2.0 - 1.0, 1.0, 1.0));
} else {
// NOTE: this is the inverse of TransformStereoScreenSpaceTex, would be nicer if we just had the UV from before that instead.
// moved into the if statement because I suspect GetLinearZFromZDepth is already doing something similar. (or, at least, is having less issues?)
#ifdef UNITY_SINGLE_PASS_STEREO
// Transform screen coord to current VR eye coord
float4 scaleOffset = unity_StereoScaleOffset[unity_StereoEyeIndex];
uv = (uv - scaleOffset.zw) / scaleOffset.xy;
#endif
float3 viewSpaceRay = mul(unity_CameraInvProjection, float4(uv * 2.0 - 1.0, 1.0, 1.0) * _ProjectionParams.z);
return viewSpaceRay * Linear01Depth(rawDepth);
}
}
float3 viewSpacePosAtPixelPosition(float2 vpos)
{
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
return viewSpacePosAtScreenUV(uv);
}
// based on Yuwen Wu's Accurate Normal Reconstruction
// https://atyuwen.github.io/posts/normal-reconstruction/
// basically as accurate as you can get!
// no artifacts on depth disparities
// no artifacts on edges
// artifacts on triangles that are <3 pixels across
// unity's compiled fragment shader stats: 66 math, 9 tex
half3 viewNormalAtPixelPosition(float2 vpos)
{
// screen uv from vpos
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
// current pixel's depth
float c = getRawDepth(uv);
// get current pixel's view space position
half3 viewSpacePos_c = viewSpacePosAtScreenUV(uv);
// get view space position at 1 pixel offsets in each major direction
half3 viewSpacePos_l = viewSpacePosAtScreenUV(uv + float2(-1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_r = viewSpacePosAtScreenUV(uv + float2( 1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_d = viewSpacePosAtScreenUV(uv + float2( 0.0,-1.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_u = viewSpacePosAtScreenUV(uv + float2( 0.0, 1.0) * _CameraDepthTexture_TexelSize.xy);
// get the difference between the current and each offset position
half3 l = viewSpacePos_c - viewSpacePos_l;
half3 r = viewSpacePos_r - viewSpacePos_c;
half3 d = viewSpacePos_c - viewSpacePos_d;
half3 u = viewSpacePos_u - viewSpacePos_c;
// get depth values at 1 & 2 pixels offsets from current along the horizontal axis
half4 H = half4(
getRawDepth(uv + float2(-1.0, 0.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2( 1.0, 0.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2(-2.0, 0.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2( 2.0, 0.0) * _CameraDepthTexture_TexelSize.xy)
);
// get depth values at 1 & 2 pixels offsets from current along the vertical axis
half4 V = half4(
getRawDepth(uv + float2(0.0,-1.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2(0.0, 1.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2(0.0,-2.0) * _CameraDepthTexture_TexelSize.xy),
getRawDepth(uv + float2(0.0, 2.0) * _CameraDepthTexture_TexelSize.xy)
);
// current pixel's depth difference from slope of offset depth samples
// differs from original article because we're using non-linear depth values
// see article's comments
half2 he = abs((2 * H.xy - H.zw) - c);
half2 ve = abs((2 * V.xy - V.zw) - c);
// pick horizontal and vertical diff with the smallest depth difference from slopes
half3 hDeriv = he.x < he.y ? l : r;
half3 vDeriv = ve.x < ve.y ? d : u;
// get view space normal from the cross product of the best derivatives
half3 viewNormal = normalize(cross(hDeriv, vDeriv));
return viewNormal;
}
struct FragData {
float4 pos;
float3 worldPos;
float3 normal;
SHADOW_COORDS(1)
};
UnityLight CreateLight(FragData i) {
UnityLight light;
#if defined(POINT) || defined(POINT_COOKIE) || defined(SPOT)
light.dir = normalize(_WorldSpaceLightPos0.xyz - i.worldPos);
#else
light.dir = _WorldSpaceLightPos0.xyz;
#endif
UNITY_LIGHT_ATTENUATION(attenuation, i, i.worldPos);
light.color = _LightColor0.rgb * attenuation;
light.ndotl = DotClamped(i.normal, light.dir);
return light;
}
float3 BoxProjection (
float3 direction, float3 position,
float4 cubemapPosition, float3 boxMin, float3 boxMax
) {
#if UNITY_SPECCUBE_BOX_PROJECTION
UNITY_BRANCH
if (cubemapPosition.w > 0) {
float3 factors = ((direction > 0 ? boxMax : boxMin) - position) / direction;
float scalar = min(min(factors.x, factors.y), factors.z);
direction = direction * scalar + (position - cubemapPosition);
}
#endif
return direction;
}
UnityIndirect CreateIndirectLight (FragData i, float3 viewDir) {
UnityIndirect indirectLight;
indirectLight.diffuse = 0;
indirectLight.specular = 0;
// #if defined(VERTEXLIGHT_ON)
// indirectLight.diffuse = i.vertexLightColor;
// #endif
#if defined(FORWARD_BASE_PASS)
indirectLight.diffuse += max(0, ShadeSH9(float4(i.normal, 1)));
float3 reflectionDir = reflect(-viewDir, i.normal);
Unity_GlossyEnvironmentData envData;
envData.roughness = 1 - _Smoothness;
envData.reflUVW = BoxProjection(
reflectionDir, i.worldPos,
unity_SpecCube0_ProbePosition,
unity_SpecCube0_BoxMin, unity_SpecCube0_BoxMax
);
float3 probe0 = Unity_GlossyEnvironment(
UNITY_PASS_TEXCUBE(unity_SpecCube0), unity_SpecCube0_HDR, envData
);
#if UNITY_SPECCUBE_BLENDING
float interpolator = unity_SpecCube0_BoxMin.w;
UNITY_BRANCH
if (interpolator < 0.99999) {
envData.reflUVW = BoxProjection(
reflectionDir, i.worldPos,
unity_SpecCube1_ProbePosition,
unity_SpecCube1_BoxMin, unity_SpecCube1_BoxMax
);
float3 probe1 = Unity_GlossyEnvironment(
UNITY_PASS_TEXCUBE_SAMPLER(unity_SpecCube1, unity_SpecCube0),
unity_SpecCube1_HDR, envData
);
indirectLight.specular = lerp(probe1, probe0, interpolator);
}
#else
indirectLight.specular = probe0;
#endif
#endif
return indirectLight;
}
float3 GetEmission () {
#if defined(FORWARD_BASE_PASS)
// #if defined(_EMISSION_MAP)
// return tex2D(_EmissionMap, i.uv.xy) * _Emission;
// #else
return _Emission;
// #endif
#else
return 0;
#endif
}
float4 frag (v2f i) : SV_Target
{
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
// wow, i'm basically computing world space position from the depth texture *twice*! I should probably fix that at some point.
// from shadertrixxx
// sample the texture
float3 fullVectorFromEyeToGeometry = i.worldPos - _WorldSpaceCameraPos;
worldSpaceDirection = normalize( i.worldPos - _WorldSpaceCameraPos );
// Compute projective scaling factor.
// perspectiveFactor is 1.0 for the center of the screen, and goes above 1.0 toward the edges,
// as the frustum extent is further away than if the zfar in the center of the screen
// went to the edges.
float perspectiveDivide = 1.0f / i.vertex.w;
perspectiveFactor = length( fullVectorFromEyeToGeometry * perspectiveDivide );
// Calculate our UV within the screen (for reading depth buffer)
float2 screenUV = i.screenPosition.xy * perspectiveDivide;
float rawDepth = SAMPLE_DEPTH_TEXTURE( _CameraDepthTexture, screenUV);
float eyeDepthWorld =
GetLinearZFromZDepth_WorksWithMirrors(rawDepth, screenUV ) * perspectiveFactor;
float4 clipPos = i.clipPos / i.clipPos.w;
clipPos.z = SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, ComputeScreenPos(clipPos));
float4 homWorldPos = mul(i.inverseVP, clipPos);
float3 wpos = homWorldPos.xyz / homWorldPos.w; // world space fragment position
//float3 wpos = _WorldSpaceCameraPos + eyeDepthWorld * worldSpaceDirection;
float3 opos = mul (unity_WorldToObject, float4(wpos,1)).xyz;
clip(0.5 - abs(opos));
uint normal_width, normal_height, normal_elems;
normal_elems = 0;
// It is possible for the DepthNormals texture to be computed for the MainCamera, but not for Mirror(s)
// It is also possible for a material to render into the Depth Texture, but not the DepthNormals texture.
// With this in mind, we have a few tests to try to perform, and fall back to normal reconstruction from
// the depth texture.
float3 viewNormal;
bool reconstruct_normals = false;
// TEST 1: Are the depth texture and the depthnormal texture the same size?
// As far as I know, in all cases where the DepthNormal texture is correct, this will be true.
// The motivating example is for mirrors: if the DepthNormal pre-pass is not running for the mirror,
// then we will be re-using the texture computed in the main pass, which is incorrect.
// In this case, if the mirror has a resolution limit set, then the depth texture resolution will be
// a different resolution from the (main camera) DepthNormals texture resolution.
// Unfortunately, there are still cases where the mirror textures match the main camera textures in size,
// so we can't solely rely on it. Nevertheless, this should be a cheap test.
bool resolutionTest = _CameraDepthTexture_TexelSize == _CameraDepthNormalsTexture_TexelSize;
// completely uniform branch flow. Also have the option to skip the attempt to use DepthNormals entirely.
UNITY_BRANCH
if (resolutionTest && _UseDepthNormals) {
float4 depthNormal = SAMPLE_RAW_DEPTH_TEXTURE_LOD(_CameraDepthNormalsTexture, float4(screenUV, 0, 0));
float decodedDepth;
DecodeDepthNormal(depthNormal, decodedDepth, viewNormal);
// TEST 2: is the depth from the Depth texture and the DepthNormal texture approximately equal?
// Motivating example: in the case an object is rendered to the depth texture, but not the DepthNormals texture,
// the depth between the two *should* be very different, and we should fall back to reconstructing the normals.
// Should be a similar story for incorrect DepthNormals in mirror, barring some artifacts that are likely to
// occur from relying on this test in this situation. I don't have any ideas, unfortunately,
// The amount of error seemingly inherent to this process is a little concerning. i.e i have error bars of 10cm.
// nevertheless, this should work.
//return float4((eyeDepthWorld * _ProjectionParams.w).rrr, 1);
float error = abs(eyeDepthWorld - (decodedDepth * perspectiveFactor * _ProjectionParams.z));
//return error;
if (error >= 0.1) {
reconstruct_normals = true;
}
} else {
reconstruct_normals = true;
}
//return reconstruct_normals;
// I'm betting that this control flow is "uniform enough" and the relevant item "expensive enough" that it's worth forcing a branch here.
UNITY_BRANCH
if (reconstruct_normals || !_UseDepthNormals) {
viewNormal = viewNormalAtPixelPosition(i.vertex.xy);
}
//return float4(viewNormal, 1);
float3 WorldNormal = mul((float3x3)unity_MatrixInvV, viewNormal);
//return float4(WorldNormal, 1);
// start lighting, courtesy of catlikecoding
float3 viewDir = normalize(_WorldSpaceCameraPos - wpos);
float3 albedo = _Tint.rgb;
float3 specularTint;
float oneMinusReflectivity;
albedo = DiffuseAndSpecularFromMetallic(
albedo, _Metallic, specularTint, oneMinusReflectivity
);
FragData data;
data.normal = WorldNormal;
data.worldPos = wpos;
data.pos = mul(UNITY_MATRIX_VP, wpos);
// TRANSFER_SHADOW is supposed to be done in the vertex shader
// and can assume that it has access to the object position of the vertex.
// due to the nature of this scenario, we can't
#if defined(SHADOWS_SCREEN)
#if defined(UNITY_NO_SCREENSPACE_SHADOWS)
data._ShadowCoord = mul(unity_WorldToShadow[0], wpos);
#else
data._ShadowCoord = ComputeScreenPos(clipPos);
#endif
//data._ShadowCoords = i._ShadowCoords;
#elif defined(SHADOWS_DEPTH)
data._ShadowCoord = mul(unity_WorldToShadow[0], wpos);
#elif defined(SHADOWS_CUBE)
data._ShadowCoord = wpos - _LightPositionRange.xyz;
#endif
// struct {
// float3 vertex;
// } v;
// v.vertex = wpos;
// TRANSFER_SHADOW(data);
//return float4(depthNormal.ba, 0, 1);
//return float4(ShadeSH9(float4(data.normal, 1)), 1);
//return half4(GammaToLinearSpace(WorldNormal.xyz * 0.5 + 0.5), 1.0);
float4 color = UNITY_BRDF_PBS(
albedo, specularTint,
oneMinusReflectivity, _Smoothness,
WorldNormal, viewDir,
CreateLight(data), CreateIndirectLight(data, viewDir)
);
color.rgb += GetEmission();
//color.a = _Tint.a;
return color;
}
using UdonSharp;
using UnityEngine;
using VRC.SDKBase;
using VRC.SDK3.Components;
using VRC.Udon;
[UdonBehaviourSyncMode(BehaviourSyncMode.None)]
public class MirrorDepthNormal : UdonSharpBehaviour
{
public string MirrorCamPath = null;
public GameObject MirrorCamObj = null;
// TODO: have a central manager script to register cameras to and toggle DepthNormals from.
void Start() {
if (MirrorCamPath == null || MirrorCamPath == "") {
if (!gameObject.GetComponent<VRCMirrorReflection>()) {
Debug.Log("[MirrorDepthNormal] Warning: Object '" + gameObject.name + "' is not a mirror and doesn't have a specific (mirror) camera path set. This may be incorrect!");
}
MirrorCamPath = "/MirrorCam" + gameObject.name;
}
}
void Update()
{
MirrorCamObj = GameObject.Find(MirrorCamPath);
if (!MirrorCamObj) {
return;
}
Camera mirrorCam = MirrorCamObj.GetComponent<Camera>();
mirrorCam.depthTextureMode = (DepthTextureMode)((int)DepthTextureMode.DepthNormals | (int)mirrorCam.depthTextureMode);
Debug.Log("Enabled DepthNormals on '" + MirrorCamPath + "', disabling. Sayonara!");
//gameObject.SetActive(false);
enabled = false;
}
}
Shader "ScreenspaceDecal"
{
Properties
{
_Tint ("Tint", Color) = (1, 1, 1, 1)
[Gamma] _Metallic ("Metallic", Range(0, 1)) = 0
_Smoothness ("Smoothness", Range(0, 1)) = 0.1
_Emission ("Emission", Color) = (0, 0, 0)
[ToggleUI] _UseDepthNormals ("Use Depth Normals", Float) = 1.0
}
SubShader
{
Tags { "RenderType" = "Transparent" "ForceNoShadowCasting"="True" "DisableBatching" = "True" }
LOD 100
Pass
{
Tags {
"LightMode" = "ForwardBase"
}
//Blend SrcAlpha OneMinusSrcAlpha
ZWrite Off
ZTest Off
Cull Front
CGPROGRAM
#pragma target 5.0
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#pragma multi_compile _ SHADOWS_SCREEN
#pragma multi_compile_instancing
#define FORWARD_BASE_PASS
#include "lighting.cginc"
ENDCG
}
Pass
{
Tags {
"LightMode" = "ForwardAdd"
}
Blend One One
//Blend SrcAlpha One
ZWrite Off
ZTest Off
Cull Front
CGPROGRAM
#pragma target 5.0
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fwdadd_fullshadows
#pragma multi_compile_fog
#pragma multi_compile_instancing
#include "lighting.cginc"
ENDCG
}
}
}
Shader "Unlit/VisualizeDepthNormals"
{
Properties
{
//_MainTex ("Texture", 2D) = "white" {}
[ToggleUI] _WorldDepthNormals ("World Depth Normals", Float) = 1
[ToggleUI] _DepthNormalDepth("Depth Normal Depth", Float) = 0
[ToggleUI] _Depth("Depth", Float) = 0
}
SubShader
{
Tags { "RenderType"="Transparent" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#pragma multi_compile_instancing
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
float3 worldPos: POSITIONT;
float2 screenPosition: TEXCOORD1;
nointerpolation float4x4 inverseVP : IVP;
nointerpolation float4x4 inverseV : IV;
nointerpolation float4x4 inverseP : IP;
UNITY_VERTEX_OUTPUT_STEREO
};
UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthNormalsTexture);
UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthTexture);
uniform float _VRChatMirrorMode;
bool isMirror() { return _VRChatMirrorMode != 0; }
float4x4 inverse(float4x4 mat)
{
float4x4 M = transpose(mat);
float m01xy = M[0].x * M[1].y - M[0].y * M[1].x;
float m01xz = M[0].x * M[1].z - M[0].z * M[1].x;
float m01xw = M[0].x * M[1].w - M[0].w * M[1].x;
float m01yz = M[0].y * M[1].z - M[0].z * M[1].y;
float m01yw = M[0].y * M[1].w - M[0].w * M[1].y;
float m01zw = M[0].z * M[1].w - M[0].w * M[1].z;
float m23xy = M[2].x * M[3].y - M[2].y * M[3].x;
float m23xz = M[2].x * M[3].z - M[2].z * M[3].x;
float m23xw = M[2].x * M[3].w - M[2].w * M[3].x;
float m23yz = M[2].y * M[3].z - M[2].z * M[3].y;
float m23yw = M[2].y * M[3].w - M[2].w * M[3].y;
float m23zw = M[2].z * M[3].w - M[2].w * M[3].z;
float4 adjM0, adjM1, adjM2, adjM3;
adjM0.x =+ dot(M[1].yzw, float3(m23zw, - m23yw, m23yz));
adjM0.y =- dot(M[0].yzw, float3(m23zw, - m23yw, m23yz));
adjM0.z =+ dot(M[3].yzw, float3(m01zw, - m01yw, m01yz));
adjM0.w =- dot(M[2].yzw, float3(m01zw, - m01yw, m01yz));
adjM1.x =- dot(M[1].xzw, float3(m23zw, - m23xw, m23xz));
adjM1.y =+ dot(M[0].xzw, float3(m23zw, - m23xw, m23xz));
adjM1.z =- dot(M[3].xzw, float3(m01zw, - m01xw, m01xz));
adjM1.w =+ dot(M[2].xzw, float3(m01zw, - m01xw, m01xz));
adjM2.x =+ dot(M[1].xyw, float3(m23yw, - m23xw, m23xy));
adjM2.y =- dot(M[0].xyw, float3(m23yw, - m23xw, m23xy));
adjM2.z =+ dot(M[3].xyw, float3(m01yw, - m01xw, m01xy));
adjM2.w =- dot(M[2].xyw, float3(m01yw, - m01xw, m01xy));
adjM3.x =- dot(M[1].xyz, float3(m23yz, - m23xz, m23xy));
adjM3.y =+ dot(M[0].xyz, float3(m23yz, - m23xz, m23xy));
adjM3.z =- dot(M[3].xyz, float3(m01yz, - m01xz, m01xy));
adjM3.w =+ dot(M[2].xyz, float3(m01yz, - m01xz, m01xy));
float invDet = rcp(dot(M[0].xyzw, float4(adjM0.x, adjM1.x, adjM2.x, adjM3.x)));
return transpose(float4x4(adjM0 * invDet, adjM1 * invDet, adjM2 * invDet, adjM3 * invDet));
}
v2f vert (appdata v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_OUTPUT(v2f, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv; //TRANSFORM_TEX(v.uv, _MainTex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex).xyz;
// Save the clip space position so we can use it later.
// This also handles situations where the Y is flipped.
float2 suv = o.vertex * float2( 0.5, 0.5*_ProjectionParams.x);
// Tricky, constants like the 0.5 and the second paramter
// need to be premultiplied by o.vertex.w.
o.screenPosition = TransformStereoScreenSpaceTex( suv+0.5*o.vertex.w, o.vertex.w );
// if (isMirror()) {
// o.uv.x = 1-o.uv.x;
// }
o.inverseVP = inverse(UNITY_MATRIX_VP);
o.inverseV = inverse(UNITY_MATRIX_V);
o.inverseP = inverse(UNITY_MATRIX_P);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
// from https://github.com/cnlohr/shadertrixx
// Inspired by Internal_ScreenSpaceeShadow implementation. This was adapted by lyuma.
// This code can be found on google if you search for "computeCameraSpacePosFromDepthAndInvProjMat"
// Note: The output of this will still need to be adjusted. It is NOT in world space units.
float GetLinearZFromZDepth_WorksWithMirrors(float zDepthFromMap, float2 screenUV)
{
#if defined(UNITY_REVERSED_Z)
zDepthFromMap = 1 - zDepthFromMap;
// When using a mirror, the far plane is whack. This just checks for it and aborts.
if( zDepthFromMap >= 1.0 ) return _ProjectionParams.z;
#endif
float4 clipPos = float4(screenUV.xy, zDepthFromMap, 1.0);
clipPos.xyz = 2.0f * clipPos.xyz - 1.0f;
float4 camPos = mul(unity_CameraInvProjection, clipPos);
return -camPos.z / camPos.w;
}
inline float InverseLinear01Depth(float linearDepth)
{
return (1.0 - (linearDepth * _ZBufferParams.y)) / (linearDepth * _ZBufferParams.x);
}
// inline float LinearEyeDepth( float z ) {
// return 1.0 / (_ZBufferParams.z * z + _ZBufferParams.w);
// }
// inline float CorrectedLinearEyeDepth(float z, float B)
// {
// // UNITY_MATRIX_P._34 == 1/_ZBufferParams.z == far / (1 - far/near)
// // B == _ZBufferParams.w ==
// return 1.0 / (z/UNITY_MATRIX_P._34 + B);
// }
float _DepthNormalDepth;
float _WorldDepthNormals;
float _Depth;
fixed4 frag (v2f i) : SV_Target
{
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
// sample the texture
float3 fullVectorFromEyeToGeometry = i.worldPos - _WorldSpaceCameraPos;
float3 worldSpaceDirection = normalize( i.worldPos - _WorldSpaceCameraPos );
// Compute projective scaling factor.
// perspectiveFactor is 1.0 for the center of the screen, and goes above 1.0 toward the edges,
// as the frustum extent is further away than if the zfar in the center of the screen
// went to the edges.
float perspectiveDivide = 1.0f / i.vertex.w;
float perspectiveFactor = length( fullVectorFromEyeToGeometry * perspectiveDivide );
// Calculate our UV within the screen (for reading depth buffer)
float2 screenUV = i.screenPosition.xy * perspectiveDivide;
float rawDepth = SAMPLE_DEPTH_TEXTURE( _CameraDepthTexture, screenUV);
float eyeDepthWorld =
GetLinearZFromZDepth_WorksWithMirrors(rawDepth, screenUV ) * perspectiveFactor;
float3 worldPosEyeHitInDepthTexture = _WorldSpaceCameraPos + eyeDepthWorld * worldSpaceDirection;
float4 rawDepthNormal = SAMPLE_RAW_DEPTH_TEXTURE(_CameraDepthNormalsTexture, screenUV);
float3 viewNormal;
float depth;
DecodeDepthNormal(rawDepthNormal, depth, viewNormal);
float4 depthClipPosition = float4(screenUV, 0.5, 1.0);
float3 depthViewDir = mul(unity_CameraInvProjection, depthClipPosition);
float impreciseDepth = depth * perspectiveFactor * _ProjectionParams.z;
float3 WorldNormal = mul((float3x3)unity_MatrixInvV, viewNormal);
float lengthBeforeScreen = length(fullVectorFromEyeToGeometry);
//return half4(GammaToLinearSpace(WorldNormal.xyz * 0.5 + 0.5), 1.0);
float3 col;
if (_WorldDepthNormals) {
col = GammaToLinearSpace(WorldNormal.xyz * 0.5 + 0.5);
} else if (_DepthNormalDepth) {
col = GammaToLinearSpace((impreciseDepth - lengthBeforeScreen) / 10);
} else if (_Depth) {
col = GammaToLinearSpace((eyeDepthWorld - lengthBeforeScreen) / 10);
}
//float3 col = worldPosEyeHitInDepthTexture - worldPosition;
//float3 col = worldPosEyeHitInDepthTexture;
//float3 col = worldPosition;
//float3 col = worldPosEyeHitInDepthTexture;
//float3 col = eyeDepthWorld * _ProjectionParams.w;
// both of these only work in the mirror
//float3 col = abs(eyeDepthWorld - (depth * perspectiveFactor * _ProjectionParams.z));
//float3 col = rawDepth;
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return fixed4(col, 1);
}
ENDCG
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment