#include "../shaderenv.h" #include "common.h" struct fragment { // normalized screen position float4 pos: WPOS; float2 texCoord: TEXCOORD0; float3 view: TEXCOORD1; }; struct pixel { float4 color: COLOR0; float3 normal: COLOR1; //float3 diffVal: COLOR2; }; /** function for standard deferred shading */ float4 shade(fragment IN, uniform float4 color, uniform float3 normal, float3 lightDir, float4 ao) { // diffuse intensity const float angle = saturate(dot(normal, lightDir)); float4 lightDiffuse = glstate.light[0].diffuse; float4 diffuse = angle * lightDiffuse; // global ambient const float4 ambient = glstate.light[0].ambient; float4 outColor; // hack: prevent to shade the sky if (color.w > DEPTH_THRESHOLD) { outColor = color; } else { outColor = (ambient * ao + diffuse) * color; //outColor = ambient + diffuse * color; } return outColor; } /** The mrt shader for standard rendering */ pixel main(fragment IN, uniform sampler2D colors, uniform sampler2D normals, uniform float3 lightDir, uniform sampler2D aoTex ) { pixel OUT; float4 norm = tex2D(normals, IN.texCoord); float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0)); float4 ao = tex2Dlod(aoTex, float4(IN.texCoord, 0, 0)); float3 normal = normalize(norm.xyz); float4 col = shade(IN, color, normal, lightDir, ao); OUT.color = col; // store scaled view vector so wie don't have to normalize for later //OUT.color.w = color.w / length(IN.view); OUT.color.w = color.w; return OUT; } float CalcShadowTerm(fragment IN, uniform sampler2D shadowMap, uniform float scale, uniform float2 lightSpacePos, uniform float depth, uniform float2 samples[NUM_PCF_TABS], uniform float weights[NUM_PCF_TABS], uniform sampler2D noiseTexture ) { //float shadowDepth = tex2D(shadowMap, lightSpacePos).x; //return step(depth, shadowDepth); float total_d = .0f; float total_w = .0f; for (int i = 0; i < NUM_PCF_TABS; ++ i) { float2 offset; const float w = weights[i]; #if 1 //////////////////// //-- add random noise: reflect around random normal vector (warning: slow!) float2 mynoise = tex2D(noiseTexture, IN.texCoord * 4.0f).xy; //offset = myreflect(samples[i], mynoise); offset = myrotate(samples[i], mynoise.x); #else offset = samples[i]; #endif // weight with projected coordinate to reach similar kernel size for near and far float2 texcoord = lightSpacePos + offset * scale; float shadowDepth = tex2D(shadowMap, texcoord).x; total_d += w * step(depth, shadowDepth); total_w += w; } total_d /= (float)total_w; return total_d; } pixel main_shadow(fragment IN, uniform sampler2D colors, uniform sampler2D positions, uniform sampler2D normals, uniform sampler2D shadowMap, uniform float4x4 shadowMatrix, uniform float sampleWidth, uniform sampler2D noiseTex, uniform float2 samples[NUM_PCF_TABS], uniform float weights[NUM_PCF_TABS], uniform float3 lightDir, uniform float3 eyePos, uniform float3 bl, uniform float3 br, uniform float3 tl, uniform float3 tr ) { pixel OUT; const float3 normal = tex2D(normals, IN.texCoord.xy); float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0)); /// reconstruct position from the eye space depth float3 viewDir = IN.view; const float lenView = length(viewDir); viewDir /= lenView; const float eyeDepth = tex2Dlod(colors, float4(IN.texCoord, 0, 0)).w; const float4 worldPos = float4(eyePos - viewDir * eyeDepth, 1); // diffuse intensity const float angle = saturate(dot(normal, lightDir)); const float4 lightDiffuse = glstate.light[0].diffuse; float4 diffuse = lightDiffuse * angle; // hack: prevent shadowing the sky const bool useShading = (color.w < 1e19f); // calc diffuse illumination + shadow term if (useShading && (angle > 1e-3f) // shadow only if diffuse color has some minimum intensity ) { float4 lightSpacePos = mul(shadowMatrix, worldPos); lightSpacePos /= lightSpacePos.w; float shadowTerm = CalcShadowTerm(IN, shadowMap, sampleWidth, lightSpacePos.xy, lightSpacePos.z, samples, weights, noiseTex); diffuse *= shadowTerm; } // light ambient term const float4 ambient = glstate.light[0].ambient; // compute shading OUT.color = useShading ? (ambient + diffuse) * color : color; // store scaled view vector from now on so wie don't have to normalize later (e.g., for ssao) //OUT.color.w = color.w / lenView; OUT.color.w = color.w; return OUT; } float4 Output(fragment IN, uniform sampler2D colors): COLOR { return tex2Dlod(colors, float4(IN.texCoord, 0, 0)); } float4 ScaleDepth(fragment IN, uniform sampler2D colors): COLOR { float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0)); // store scaled view vector so wie don't have to normalize for e.g., ssao color.w /= length(IN.view); return color; } /** This shader computes the reprojection and checks if the reprojected pixel from last frame is still valid in the current frame */ inline float2 PixelValid(sampler2D oldTex, float4 color, float3 difVec, float2 texCoord, float3 viewDir, float3 oldEyePos, float4x4 modelViewProj, float4x4 oldModelViewProj, float3 oldbl, float3 oldbr, float3 oldtl, float3 oldtr, sampler2D myTex ) { // reconstruct position from the eye space depth const float eyeSpaceDepth = color.w; const float4 worldPos = float4(-viewDir * eyeSpaceDepth, 1.0f); //////////////// //-- calculcate the current projected posiion (also used for next frame) float4 projPos = mul(modelViewProj, worldPos); const float invw = 1.0f / projPos.w; projPos *= invw; // compute position from old frame for dynamic objects + translational portion //const float3 translatedPos = difVec - oldEyePos + worldPos.xyz; // don't use difVec here: want to detect if the actual pixel has changed => ssao changed const float3 translatedPos = -oldEyePos + worldPos.xyz; ///////////////// //-- reproject into old frame and calculate texture position of sample in old frame // note: the old model view matrix only holds the view orientation part float4 backProjPos = mul(oldModelViewProj, float4(translatedPos, 1.0f)); backProjPos /= backProjPos.w; // fit from unit cube into 0 .. 1 const float2 oldTexCoords = backProjPos.xy * .5f + .5f; // retrieve the sample from the last frame const float4 oldPixel = tex2Dlod(oldTex, float4(oldTexCoords, .0f, .0f)); const float oldDiff = tex2Dlod(myTex, float4(oldTexCoords, .0f, .0f)).x; // calculate eye space position of sample in old frame const float oldEyeSpaceDepth = oldPixel.w; // vector from eye pos to old sample const float3 oldViewDir = Interpol(oldTexCoords, oldbl, oldbr, oldtl, oldtr); const float invLen = 1.0f / length(oldViewDir); const float projectedEyeSpaceDepth = invLen * length(translatedPos); const float depthDif = abs(1.0f - oldEyeSpaceDepth / projectedEyeSpaceDepth); const float squaredLen = SqrLen(difVec); // test if this pixel was valid in the old frame float isPixelValid; // check if the pixel belonged to a dynamic object in the last frame const bool newDynamic = (squaredLen > DYNAMIC_OBJECTS_THRESHOLD); const bool oldDynamic = (oldDiff > DYNAMIC_OBJECTS_THRESHOLD); // actually 0 means pixel is valid const float pixelIsValid = .0f; // means that we only use slight temporal coherence over some frames // so that there is no noticeable drag const float pixelCouldBeValid = 2.0f; // this pixel information has to be discarded in order to not create artifacts const float pixelIsNotValid = 100.0f; // check if the pixel was outside of the frame buffer if ((oldTexCoords.x <= .0f) || (oldTexCoords.x >= 1.0f) || (oldTexCoords.y <= .0f) || (oldTexCoords.y >= 1.0f) ) { isPixelValid = pixelIsNotValid; } else if ( // check if changed from dynamic to not dynamic object ((oldDynamic && !newDynamic) || (!oldDynamic && newDynamic) || ( (oldEyeSpaceDepth < DEPTH_THRESHOLD) && (projectedEyeSpaceDepth < DEPTH_THRESHOLD) && (oldDynamic || newDynamic) && // check if we have a dynamic object (depthDif > MIN_DEPTH_DIFF)))) // and there is a depth discontinuity { isPixelValid = pixelCouldBeValid; } else { isPixelValid = pixelIsValid; } return float2(isPixelValid, abs(oldEyeSpaceDepth - projectedEyeSpaceDepth)); } /** This function is called during downsampling of the buffers for ssao. */ pixel PrepareSsao(fragment IN, uniform sampler2D colorsTex, uniform sampler2D normalsTex, uniform sampler2D diffVals, uniform sampler2D oldTex, uniform float4x4 modelViewProj, uniform float4x4 oldModelViewProj, uniform float3 oldbl, uniform float3 oldbr, uniform float3 oldtl, uniform float3 oldtr, uniform float3 oldEyePos, uniform sampler2D myTex ) { pixel pix; float4 color = tex2Dlod(colorsTex, float4(IN.texCoord, .0f, .0f)); // store scaled view vector so wie don't have to normalize for e.g., SSAO color.w /= length(IN.view); const float4 difVec = tex2Dlod(diffVals, float4(IN.texCoord, 0, 0)); // normalize normal once more because of bilinear interpolation const float3 normal = normalize(tex2Dlod(normalsTex, float4(IN.texCoord, 0, 0)).xyz); //#ifdef PERFORMANCE_TEST #if 1 // do reprojection and filter out the pixels that are not save const float2 pValid = PixelValid(oldTex, color, difVec.xyz, IN.texCoord, IN.view, oldEyePos, modelViewProj, oldModelViewProj, oldbl, oldbr, oldtl, oldtr, myTex ); #else const float2 pValid = float2(0,0); #endif pix.color = color; pix.color.xy = pValid.xy; pix.color.z = color.w; pix.normal = normal; return pix; } float4 DownSample(fragment IN, uniform sampler2D colors, uniform float2 downSampleOffs[NUM_DOWNSAMPLES]): COLOR { // let bilinear filtering do its work float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0)); return color; }