[2884] | 1 | #include "../shaderenv.h"
|
---|
[3227] | 2 | #include "common.h"
|
---|
[2884] | 3 |
|
---|
[2881] | 4 | ////////////////////
|
---|
| 5 | // Screen Spaced Ambient Occlusion shader
|
---|
| 6 | // based on shader of Alexander Kusternig
|
---|
| 7 |
|
---|
[3144] | 8 |
|
---|
[3106] | 9 | #define USE_EYESPACE_DEPTH 1
|
---|
[3105] | 10 |
|
---|
| 11 |
|
---|
[2881] | 12 | struct fragment
|
---|
| 13 | {
|
---|
[2889] | 14 | float2 texCoord: TEXCOORD0;
|
---|
| 15 | float3 view: TEXCOORD1;
|
---|
[2881] | 16 | };
|
---|
| 17 |
|
---|
| 18 |
|
---|
| 19 | struct pixel
|
---|
| 20 | {
|
---|
| 21 | float4 illum_col: COLOR0;
|
---|
| 22 | };
|
---|
| 23 |
|
---|
[3247] | 24 | // this function is inspired from the paper of shamulgaan in order
|
---|
| 25 | // to get a physical expression for the occlusion culling
|
---|
[3081] | 26 | inline float occlusionPower(float radius, float dist)
|
---|
| 27 | {
|
---|
| 28 | return 6.283185307179586476925286766559f * (1.0f - cos(asin(radius / dist)));
|
---|
| 29 | }
|
---|
| 30 |
|
---|
| 31 |
|
---|
[3159] | 32 |
|
---|
[2992] | 33 | // reconstruct world space position
|
---|
[3155] | 34 | inline float3 ReconstructSamplePos(float eyeSpaceDepth,
|
---|
[3017] | 35 | float2 texcoord,
|
---|
| 36 | float3 bl, float3 br, float3 tl, float3 tr)
|
---|
[2988] | 37 | {
|
---|
[3097] | 38 | float3 viewVec = Interpol(texcoord, bl, br, tl, tr);
|
---|
[3017] | 39 | float3 samplePos = -viewVec * eyeSpaceDepth;
|
---|
| 40 |
|
---|
[2999] | 41 | return samplePos;
|
---|
[2988] | 42 | }
|
---|
| 43 |
|
---|
| 44 |
|
---|
[3087] | 45 |
|
---|
[3115] | 46 | /** This shader computes the reprojection and stores
|
---|
[3155] | 47 | the ssao value of the old pixel as well as the
|
---|
| 48 | weight of the pixel in the new frame.
|
---|
[3082] | 49 | */
|
---|
[3137] | 50 | inline float2 temporalSmoothing(float4 worldPos,
|
---|
[3095] | 51 | float eyeSpaceDepth,
|
---|
| 52 | float2 texcoord0,
|
---|
| 53 | float3 oldEyePos,
|
---|
[3113] | 54 | sampler2D oldTex,
|
---|
| 55 | float4x4 oldModelViewProj,
|
---|
| 56 | sampler2D colors,
|
---|
[3112] | 57 | float3 projPos,
|
---|
[3109] | 58 | float invW,
|
---|
[3113] | 59 | float3 oldbl,
|
---|
| 60 | float3 oldbr,
|
---|
| 61 | float3 oldtl,
|
---|
| 62 | float3 oldtr,
|
---|
[3192] | 63 | float3 diffVec
|
---|
[3109] | 64 | )
|
---|
[3082] | 65 | {
|
---|
[3113] | 66 | // compute position from old frame for dynamic objects + translational portion
|
---|
[3133] | 67 | const float3 translatedPos = diffVec - oldEyePos + worldPos.xyz;
|
---|
[3111] | 68 |
|
---|
[3082] | 69 |
|
---|
[3109] | 70 | /////////////////
|
---|
| 71 | //-- reproject into old frame and calculate texture position of sample in old frame
|
---|
| 72 |
|
---|
| 73 | // note: the old model view matrix only holds the view orientation part
|
---|
[3115] | 74 | float4 backProjPos = mul(oldModelViewProj, float4(translatedPos, 1.0f));
|
---|
[3083] | 75 | backProjPos /= backProjPos.w;
|
---|
[3109] | 76 |
|
---|
[3082] | 77 | // fit from unit cube into 0 .. 1
|
---|
[3085] | 78 | const float2 oldTexCoords = backProjPos.xy * 0.5f + 0.5f;
|
---|
[3082] | 79 | // retrieve the sample from the last frame
|
---|
[3095] | 80 | const float4 oldPixel = tex2Dlod(oldTex, float4(oldTexCoords, .0f, .0f));
|
---|
[3105] | 81 |
|
---|
[3204] | 82 | // the ssao value in the old frame
|
---|
| 83 | const float ssao = oldPixel.x;
|
---|
| 84 |
|
---|
[3095] | 85 | // calculate eye space position of sample in old frame
|
---|
| 86 | const float oldEyeSpaceDepth = oldPixel.w;
|
---|
[3082] | 87 |
|
---|
[3095] | 88 | // vector from eye pos to old sample
|
---|
[3097] | 89 | const float3 viewVec = Interpol(oldTexCoords, oldbl, oldbr, oldtl, oldtr);
|
---|
[3109] | 90 | const float invLen = 1.0f / length(viewVec);
|
---|
[3115] | 91 | const float projectedEyeSpaceDepth = invLen * length(translatedPos);
|
---|
[3137] | 92 | //const float projectedEyeSpaceDepth = length(translatedPos);
|
---|
[3099] | 93 |
|
---|
[3109] | 94 | const float depthDif = abs(1.0f - oldEyeSpaceDepth / projectedEyeSpaceDepth);
|
---|
[3106] | 95 |
|
---|
[3225] | 96 | // the weight of the accumulated samples from the previous frames
|
---|
[3204] | 97 | float w;
|
---|
| 98 |
|
---|
| 99 | //////////////
|
---|
| 100 | //-- reuse old value only if it was still valid in the old frame
|
---|
| 101 |
|
---|
[3192] | 102 | if (1
|
---|
[3225] | 103 | && (oldTexCoords.x > 0) && (oldTexCoords.x < 1.0f)
|
---|
| 104 | && (oldTexCoords.y > 0) && (oldTexCoords.y < 1.0f)
|
---|
[3103] | 105 | && (depthDif <= MIN_DEPTH_DIFF)
|
---|
[3082] | 106 | )
|
---|
| 107 | {
|
---|
[3204] | 108 | // pixel valid => retrieve the convergence weight
|
---|
[3205] | 109 | w = oldPixel.y;
|
---|
[3082] | 110 | }
|
---|
| 111 | else
|
---|
| 112 | {
|
---|
[3204] | 113 | w = 0.0f;
|
---|
[3082] | 114 | }
|
---|
[3087] | 115 |
|
---|
[3204] | 116 | return float2(ssao, w);
|
---|
[3082] | 117 | }
|
---|
| 118 |
|
---|
| 119 |
|
---|
[2881] | 120 | /** The ssao shader returning the an intensity value between 0 and 1
|
---|
[3151] | 121 | This version of the ssao shader uses the dotproduct between pixel and
|
---|
| 122 | sample normal as weight.
|
---|
[2881] | 123 | */
|
---|
[3193] | 124 | float3 ssao2(fragment IN,
|
---|
[3150] | 125 | sampler2D colors,
|
---|
| 126 | sampler2D noiseTex,
|
---|
| 127 | float2 samples[NUM_SAMPLES],
|
---|
| 128 | float3 normal,
|
---|
| 129 | float3 centerPosition,
|
---|
| 130 | float scaleFactor,
|
---|
| 131 | float3 bl,
|
---|
| 132 | float3 br,
|
---|
| 133 | float3 tl,
|
---|
| 134 | float3 tr,
|
---|
| 135 | float3 viewDir,
|
---|
[3212] | 136 | sampler2D normalTex,
|
---|
| 137 | float sampleIntensity
|
---|
[3150] | 138 | )
|
---|
| 139 | {
|
---|
| 140 | float total_ao = .0f;
|
---|
| 141 | float numSamples = .0f;
|
---|
[3203] | 142 | float validSamples = .0f;
|
---|
[3150] | 143 |
|
---|
| 144 | for (int i = 0; i < NUM_SAMPLES; ++ i)
|
---|
| 145 | {
|
---|
| 146 | const float2 offset = samples[i];
|
---|
| 147 |
|
---|
| 148 | #if 1
|
---|
| 149 | ////////////////////
|
---|
| 150 | //-- add random noise: reflect around random normal vector (rather slow!)
|
---|
| 151 |
|
---|
| 152 | const float2 mynoise = tex2Dlod(noiseTex, float4(IN.texCoord * 4.0f, 0, 0)).xy;
|
---|
| 153 | const float2 offsetTransformed = myreflect(offset, mynoise);
|
---|
| 154 | #else
|
---|
| 155 | const float2 offsetTransformed = offset;
|
---|
| 156 | #endif
|
---|
| 157 | // weight with projected coordinate to reach similar kernel size for near and far
|
---|
| 158 | //const float2 texcoord = IN.texCoord.xy + offsetTransformed * scaleFactor + jitter;
|
---|
| 159 | const float2 texcoord = IN.texCoord.xy + offsetTransformed * scaleFactor;
|
---|
| 160 |
|
---|
| 161 | //if ((texcoord.x <= 1.0f) && (texcoord.x >= 0.0f) && (texcoord.y <= 1.0f) && (texcoord.y >= 0.0f)) ++ numSamples;
|
---|
[3155] | 162 | float4 sampleColor = tex2Dlod(colors, float4(texcoord, 0, 0));
|
---|
| 163 |
|
---|
| 164 | const float3 samplePos = ReconstructSamplePos(sampleColor.w, texcoord, bl, br, tl, tr);
|
---|
[3159] | 165 | // the normal of the current sample
|
---|
[3167] | 166 | const float3 sampleNormal = tex2Dlod(normalTex, float4(texcoord, 0, 0)).xyz;
|
---|
[3150] | 167 |
|
---|
| 168 |
|
---|
| 169 | ////////////////
|
---|
| 170 | //-- compute contribution of sample using the direction and angle
|
---|
| 171 |
|
---|
| 172 | float3 dirSample = samplePos - centerPosition;
|
---|
| 173 |
|
---|
[3199] | 174 | const float sqrLen = max(SqrLen(dirSample), 1e-2f);
|
---|
| 175 | const float lengthToSample = sqrt(sqrLen);
|
---|
| 176 | //const float lengthToSample = max(length(dirSample), 1e-6f);
|
---|
| 177 |
|
---|
[3150] | 178 | dirSample /= lengthToSample; // normalize
|
---|
| 179 |
|
---|
| 180 | // angle between current normal and direction to sample controls AO intensity.
|
---|
[3151] | 181 | float cosAngle = .5f + dot(sampleNormal, -normal) * 0.5f;
|
---|
[3155] | 182 | // use binary decision to cull samples that are behind current shading point
|
---|
| 183 | cosAngle *= step(0.0f, dot(dirSample, normal));
|
---|
[3150] | 184 |
|
---|
[3212] | 185 | const float aoContrib = sampleIntensity / sqrLen;
|
---|
[3150] | 186 | //const float aoContrib = (1.0f > lengthToSample) ? occlusionPower(9e-2f, DISTANCE_SCALE + lengthToSample): .0f;
|
---|
| 187 |
|
---|
[3225] | 188 | total_ao += cosAngle * aoContrib;
|
---|
[3150] | 189 |
|
---|
[3157] | 190 | // check if the samples have been valid in the last frame
|
---|
[3203] | 191 | validSamples += (1.0f - step(1.0f, lengthToSample)) * sampleColor.x;
|
---|
| 192 |
|
---|
[3193] | 193 | ++ numSamples;
|
---|
[3150] | 194 | }
|
---|
| 195 |
|
---|
[3193] | 196 | total_ao /= numSamples;
|
---|
| 197 |
|
---|
[3227] | 198 | #if 1
|
---|
[3225] | 199 | // if surface normal perpenticular to view dir, approx. half of the samples will not count
|
---|
| 200 | // => compensate for this (on the other hand, projected sampling area could be larger!)
|
---|
| 201 | const float viewCorrection = 1.0f + VIEW_CORRECTION_SCALE * max(dot(viewDir, normal), 0.0f);
|
---|
[3227] | 202 | total_ao += cosAngle * aoContrib * viewCorrection;
|
---|
[3225] | 203 |
|
---|
| 204 | #endif
|
---|
| 205 |
|
---|
[3227] | 206 | return float3(max(0.0f, 1.0f - total_ao), validSamples, numSamples);
|
---|
[3150] | 207 | }
|
---|
| 208 |
|
---|
| 209 |
|
---|
[3151] | 210 | /** The ssao shader returning the an intensity value between 0 and 1.
|
---|
| 211 | This version of the ssao shader uses the dotproduct between
|
---|
| 212 | pixel-to-sample direction and sample normal as weight.
|
---|
[3204] | 213 |
|
---|
| 214 | The algorithm works like the following:
|
---|
| 215 | 1) Check in a circular area around the current position.
|
---|
| 216 | 2) Shoot vectors to the positions there, and check the angle to these positions.
|
---|
| 217 | 3) Summing up these angles gives an estimation of the occlusion at the current position.
|
---|
[3150] | 218 | */
|
---|
[3192] | 219 | float3 ssao(fragment IN,
|
---|
[3117] | 220 | sampler2D colors,
|
---|
| 221 | sampler2D noiseTex,
|
---|
| 222 | float2 samples[NUM_SAMPLES],
|
---|
| 223 | float3 normal,
|
---|
| 224 | float3 centerPosition,
|
---|
| 225 | float scaleFactor,
|
---|
| 226 | float3 bl,
|
---|
| 227 | float3 br,
|
---|
| 228 | float3 tl,
|
---|
| 229 | float3 tr,
|
---|
[3192] | 230 | float3 viewDir,
|
---|
[3230] | 231 | float convergence,
|
---|
[3213] | 232 | float sampleIntensity,
|
---|
[3227] | 233 | bool isMovingObject
|
---|
[3083] | 234 | )
|
---|
[2881] | 235 | {
|
---|
[3084] | 236 | float total_ao = .0f;
|
---|
[3192] | 237 | float validSamples = .0f;
|
---|
[3084] | 238 | float numSamples = .0f;
|
---|
[2881] | 239 |
|
---|
| 240 | for (int i = 0; i < NUM_SAMPLES; ++ i)
|
---|
| 241 | {
|
---|
[3230] | 242 | float2 offset;
|
---|
[2881] | 243 |
|
---|
| 244 | ////////////////////
|
---|
[3204] | 245 | //-- add random noise: reflect around random normal vector
|
---|
[3227] | 246 | //-- (affects performance for some reason!)
|
---|
[2985] | 247 |
|
---|
[3231] | 248 | if (1)//convergence < 700)
|
---|
[3230] | 249 | {
|
---|
| 250 | float2 mynoise = tex2Dlod(noiseTex, float4(IN.texCoord * 4.0f, 0, 0)).xy;
|
---|
| 251 | //offsetTransformed = myreflect(offset, mynoise);
|
---|
| 252 | offset = myrotate(samples[i], mynoise.x);
|
---|
| 253 | }
|
---|
| 254 | else
|
---|
| 255 | {
|
---|
| 256 | offset = samples[i];
|
---|
| 257 | }
|
---|
[3227] | 258 |
|
---|
[2881] | 259 | // weight with projected coordinate to reach similar kernel size for near and far
|
---|
[3230] | 260 | const float2 texcoord = IN.texCoord.xy + offset * scaleFactor;
|
---|
[2881] | 261 |
|
---|
[3203] | 262 | const float4 sampleColor = tex2Dlod(colors, float4(texcoord, .0f, .0f));
|
---|
[3155] | 263 | const float3 samplePos = ReconstructSamplePos(sampleColor.w, texcoord, bl, br, tl, tr);
|
---|
[3150] | 264 |
|
---|
[2989] | 265 |
|
---|
[3017] | 266 | ////////////////
|
---|
| 267 | //-- compute contribution of sample using the direction and angle
|
---|
[2881] | 268 |
|
---|
[3017] | 269 | float3 dirSample = samplePos - centerPosition;
|
---|
[2999] | 270 |
|
---|
[3227] | 271 | //const float sqrLen = max(SqrLen(dirSample), 1e-2f);
|
---|
| 272 | //const float lengthToSample = sqrt(sqrLen);
|
---|
| 273 | const float lengthToSample = max(length(dirSample), 1e-2f);
|
---|
[3197] | 274 |
|
---|
[3095] | 275 | dirSample /= lengthToSample; // normalize
|
---|
| 276 |
|
---|
[2885] | 277 | // angle between current normal and direction to sample controls AO intensity.
|
---|
[3227] | 278 | float cosAngle = dot(dirSample, normal);
|
---|
| 279 |
|
---|
| 280 | //const float aoContrib = sampleIntensity / sqrLen;
|
---|
| 281 | const float aoContrib = sampleIntensity / lengthToSample;
|
---|
[3089] | 282 | //const float aoContrib = (1.0f > lengthToSample) ? occlusionPower(9e-2f, DISTANCE_SCALE + lengthToSample): .0f;
|
---|
[2881] | 283 |
|
---|
[3227] | 284 | total_ao += max(cosAngle, 0) * aoContrib;
|
---|
[3157] | 285 |
|
---|
[3206] | 286 | ++ numSamples;
|
---|
[3213] | 287 |
|
---|
[3157] | 288 | // check if the samples have been valid in the last frame
|
---|
[3213] | 289 | // only mark sample as invalid if in the last / current frame
|
---|
| 290 | // they possibly have any influence on the ao
|
---|
[3206] | 291 | const float changeFactor = sampleColor.y;
|
---|
| 292 | const float pixelValid = sampleColor.x;
|
---|
[3204] | 293 |
|
---|
[3213] | 294 | // we check if the sample could have been near enough to the current pixel
|
---|
| 295 | // to have any influence in the current or last frame
|
---|
[3206] | 296 | const float tooFarAway = step(0.5f, lengthToSample - changeFactor);
|
---|
[3227] | 297 | validSamples = max(validSamples, (1.0f - tooFarAway) * pixelValid * step(-0.1f, cosAngle));
|
---|
[3192] | 298 |
|
---|
[3227] | 299 | #ifdef USE_GTX
|
---|
[3213] | 300 | // we can bail out early and use a minimal #samples)
|
---|
| 301 | // if some conditions are met as long as the hardware supports it
|
---|
[3230] | 302 | if (numSamples >= MIN_SAMPLES)
|
---|
[3213] | 303 | {
|
---|
[3230] | 304 | //break;
|
---|
[3213] | 305 | // if the pixel belongs to a static object and all the samples stay valid in the current frame
|
---|
| 306 | if (!isMovingObject && (validSamples < 1.0f)) break;
|
---|
| 307 | // if the pixel belongs to a dynamic object but the #accumulated samples for this pixel is sufficiently high
|
---|
| 308 | // (=> there was no discontinuity recently)
|
---|
[3230] | 309 | else if (isMovingObject && (convergence > NUM_SAMPLES * 5)) break;
|
---|
[3213] | 310 | }
|
---|
| 311 | #endif
|
---|
[2881] | 312 | }
|
---|
| 313 |
|
---|
[3225] | 314 | // "normalize" ao contribution
|
---|
[3192] | 315 | total_ao /= numSamples;
|
---|
| 316 |
|
---|
[3225] | 317 | #if 1
|
---|
| 318 | // if surface normal perpenticular to view dir, approx. half of the samples will not count
|
---|
| 319 | // => compensate for this (on the other hand, projected sampling area could be larger!)
|
---|
| 320 | const float viewCorrection = 1.0f + VIEW_CORRECTION_SCALE * max(dot(viewDir, normal), 0.0f);
|
---|
| 321 | total_ao *= viewCorrection;
|
---|
| 322 | #endif
|
---|
| 323 |
|
---|
[3271] | 324 | //return float3(total_ao, validSamples, numSamples);
|
---|
| 325 | return float3(min(1.0f, total_ao), validSamples, numSamples);
|
---|
[2881] | 326 | }
|
---|
| 327 |
|
---|
[3121] | 328 |
|
---|
[3150] | 329 |
|
---|
[2881] | 330 | /** The mrt shader for screen space ambient occlusion
|
---|
| 331 | */
|
---|
| 332 | pixel main(fragment IN,
|
---|
| 333 | uniform sampler2D colors,
|
---|
| 334 | uniform sampler2D normals,
|
---|
[3084] | 335 | uniform sampler2D noiseTex,
|
---|
[2881] | 336 | uniform float2 samples[NUM_SAMPLES],
|
---|
| 337 | uniform sampler2D oldTex,
|
---|
[3085] | 338 | uniform float4x4 modelViewProj,
|
---|
| 339 | uniform float4x4 oldModelViewProj,
|
---|
[2985] | 340 | uniform float temporalCoherence,
|
---|
[2986] | 341 | uniform float3 bl,
|
---|
| 342 | uniform float3 br,
|
---|
| 343 | uniform float3 tl,
|
---|
[3085] | 344 | uniform float3 tr,
|
---|
| 345 | uniform float3 oldEyePos,
|
---|
| 346 | uniform float3 oldbl,
|
---|
| 347 | uniform float3 oldbr,
|
---|
| 348 | uniform float3 oldtl,
|
---|
[3109] | 349 | uniform float3 oldtr,
|
---|
[3212] | 350 | uniform sampler2D attribsTex,
|
---|
| 351 | uniform float kernelRadius,
|
---|
| 352 | uniform float sampleIntensity
|
---|
[2881] | 353 | )
|
---|
| 354 | {
|
---|
| 355 | pixel OUT;
|
---|
| 356 |
|
---|
[3167] | 357 | //const float3 normal = normalize(tex2Dlod(normals, float4(IN.texCoord, 0 ,0)).xyz);
|
---|
| 358 | const float3 normal = tex2Dlod(normals, float4(IN.texCoord, 0 ,0)).xyz;
|
---|
[2975] | 359 |
|
---|
[3082] | 360 | // reconstruct position from the eye space depth
|
---|
[3097] | 361 | const float3 viewDir = IN.view;
|
---|
[3089] | 362 | const float eyeSpaceDepth = tex2Dlod(colors, float4(IN.texCoord, 0, 0)).w;
|
---|
[3097] | 363 | const float4 eyeSpacePos = float4(-viewDir * eyeSpaceDepth, 1.0f);
|
---|
[3014] | 364 |
|
---|
[3121] | 365 | float3 diffVec = tex2Dlod(attribsTex, float4(IN.texCoord, 0, 0)).xyz;
|
---|
| 366 |
|
---|
[3001] | 367 |
|
---|
[3017] | 368 | ////////////////
|
---|
[3080] | 369 | //-- calculcate the current projected posiion (also used for next frame)
|
---|
[3017] | 370 |
|
---|
[3094] | 371 | float4 projPos = mul(modelViewProj, eyeSpacePos);
|
---|
[3112] | 372 | const float invw = 1.0f / projPos.w;
|
---|
| 373 | projPos *= invw;
|
---|
[3212] | 374 | float scaleFactor = kernelRadius * invw;
|
---|
[3121] | 375 |
|
---|
[3213] | 376 | const float sqrMoveSpeed = SqrLen(diffVec);
|
---|
| 377 | const bool isMovingObject = (sqrMoveSpeed > DYNAMIC_OBJECTS_THRESHOLD);
|
---|
| 378 |
|
---|
[3017] | 379 |
|
---|
[3121] | 380 | /////////////////
|
---|
| 381 | //-- compute temporal reprojection
|
---|
| 382 |
|
---|
[3137] | 383 | float2 temporalVals = temporalSmoothing(eyeSpacePos, eyeSpaceDepth, IN.texCoord, oldEyePos,
|
---|
[3192] | 384 | oldTex, oldModelViewProj,
|
---|
[3121] | 385 | colors,
|
---|
| 386 | projPos.xyz,
|
---|
| 387 | invw,
|
---|
| 388 | oldbl, oldbr, oldtl, oldtr,
|
---|
[3192] | 389 | diffVec
|
---|
[3129] | 390 | );
|
---|
[3121] | 391 |
|
---|
| 392 | const float oldSsao = temporalVals.x;
|
---|
[3192] | 393 | float oldWeight = temporalVals.y;
|
---|
| 394 |
|
---|
| 395 | float3 ao;
|
---|
[3137] | 396 |
|
---|
[3192] | 397 | // cull background note: this should be done with the stencil buffer
|
---|
| 398 | if (eyeSpaceDepth < 1e10f)
|
---|
| 399 | {
|
---|
[3227] | 400 | ao = ssao(IN, colors, noiseTex, samples, normal, eyeSpacePos.xyz, scaleFactor, bl, br, tl, tr, normalize(viewDir), oldWeight, sampleIntensity, isMovingObject);
|
---|
[3212] | 401 | //ao = ssao2(IN, colors, noiseTex, samples, normal, eyeSpacePos.xyz, scaleFactor, bl, br, tl, tr, normalize(viewDir), normals, sampleIntensity);
|
---|
[3192] | 402 | }
|
---|
| 403 | else
|
---|
| 404 | {
|
---|
[3198] | 405 | ao = float3(1.0f, 1.0f, 1.0f);
|
---|
[3192] | 406 | }
|
---|
[3122] | 407 |
|
---|
[3213] | 408 |
|
---|
| 409 | ///////////
|
---|
| 410 | //-- check if we have to reset pixel because one of the sample points was invalid
|
---|
| 411 | //-- only do this if the current pixel does not belong to a moving object
|
---|
[3205] | 412 |
|
---|
[3213] | 413 | // the weight equals the number of sampled shot in this pass
|
---|
| 414 | const float newWeight = ao.z;
|
---|
| 415 |
|
---|
[3225] | 416 | // completely reset the ao in this pixel
|
---|
[3213] | 417 | const float completelyResetThres = 4.0f;
|
---|
[3225] | 418 | // don't fully reset the ao in this pixel, but give low weight to old solution
|
---|
[3213] | 419 | const float partlyResetThres = 1.0f;
|
---|
| 420 |
|
---|
| 421 | if (!isMovingObject)
|
---|
[3206] | 422 | {
|
---|
[3213] | 423 | if (ao.y > completelyResetThres)
|
---|
[3225] | 424 | {
|
---|
[3213] | 425 | oldWeight = .0f;
|
---|
[3225] | 426 | }
|
---|
[3213] | 427 | else if (ao.y > partlyResetThres)
|
---|
[3225] | 428 | {
|
---|
[3226] | 429 | oldWeight = min(oldWeight, 4.0f * newWeight);
|
---|
| 430 | //oldWeight = .0f;
|
---|
[3225] | 431 | }
|
---|
[3206] | 432 | }
|
---|
| 433 |
|
---|
[3213] | 434 | //////////
|
---|
| 435 | //-- blend ao between old and new samples (and avoid division by zero)
|
---|
[3225] | 436 |
|
---|
[3227] | 437 | OUT.illum_col.x = (ao.x * newWeight + oldSsao * oldWeight);// / (newWeight + oldWeight);//max(1e-6f, newWeight + oldWeight);
|
---|
[3213] | 438 |
|
---|
[3227] | 439 | OUT.illum_col.x /= (newWeight + oldWeight);
|
---|
| 440 |
|
---|
| 441 | // the new weight for the next frame
|
---|
| 442 | const float combinedWeight = clamp(newWeight + oldWeight, .0f, temporalCoherence);
|
---|
| 443 |
|
---|
[3225] | 444 | OUT.illum_col.y = combinedWeight;
|
---|
| 445 | // can be used to check if this pixel belongs to a moving object
|
---|
[3192] | 446 | OUT.illum_col.z = SqrLen(diffVec);
|
---|
[3137] | 447 | OUT.illum_col.w = eyeSpaceDepth;
|
---|
[3120] | 448 |
|
---|
[3225] | 449 | //OUT.illum_col.yzw = diffVec;
|
---|
| 450 |
|
---|
[2881] | 451 | return OUT;
|
---|
[3104] | 452 | }
|
---|