1 | #include "../shaderenv.h"
|
---|
2 | #include "common.h"
|
---|
3 |
|
---|
4 | struct fragment
|
---|
5 | {
|
---|
6 | // normalized screen position
|
---|
7 | float4 pos: WPOS;
|
---|
8 | float2 texCoord: TEXCOORD0;
|
---|
9 | float3 view: TEXCOORD1;
|
---|
10 | };
|
---|
11 |
|
---|
12 |
|
---|
13 | struct pixel
|
---|
14 | {
|
---|
15 | float4 color: COLOR0;
|
---|
16 | float3 normal: COLOR1;
|
---|
17 | float3 diffVal: COLOR2;
|
---|
18 | };
|
---|
19 |
|
---|
20 |
|
---|
21 | /** function for standard deferred shading
|
---|
22 | */
|
---|
23 | float4 shade(fragment IN,
|
---|
24 | uniform float4 color,
|
---|
25 | uniform float3 normal,
|
---|
26 | float3 lightDir)
|
---|
27 | {
|
---|
28 | // diffuse intensity
|
---|
29 | const float angle = saturate(dot(normal, lightDir));
|
---|
30 |
|
---|
31 | float4 lightDiffuse = glstate.light[0].diffuse;
|
---|
32 | float4 diffuse = angle * lightDiffuse;
|
---|
33 |
|
---|
34 | // global ambient
|
---|
35 | const float4 ambient = glstate.light[0].ambient;
|
---|
36 |
|
---|
37 | float4 outColor;
|
---|
38 |
|
---|
39 | // hack: prevent shading the sky
|
---|
40 | if (color.w > 1e19f) outColor = color;
|
---|
41 | else outColor = (ambient + diffuse) * color;
|
---|
42 |
|
---|
43 | return outColor;
|
---|
44 | }
|
---|
45 |
|
---|
46 |
|
---|
47 |
|
---|
48 | /** The mrt shader for standard rendering
|
---|
49 | */
|
---|
50 | pixel main(fragment IN,
|
---|
51 | uniform sampler2D colors,
|
---|
52 | uniform sampler2D normals,
|
---|
53 | uniform float3 lightDir
|
---|
54 | )
|
---|
55 | {
|
---|
56 | pixel OUT;
|
---|
57 |
|
---|
58 | float4 norm = tex2D(normals, IN.texCoord);
|
---|
59 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
60 |
|
---|
61 | float3 normal = normalize(norm.xyz);
|
---|
62 | float4 col = shade(IN, color, normal, lightDir);
|
---|
63 |
|
---|
64 | OUT.color = col;
|
---|
65 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
66 | //OUT.color.w = color.w / length(IN.view);
|
---|
67 | OUT.color.w = color.w;
|
---|
68 |
|
---|
69 | return OUT;
|
---|
70 | }
|
---|
71 |
|
---|
72 |
|
---|
73 | float CalcShadowTerm(fragment IN,
|
---|
74 | uniform sampler2D shadowMap,
|
---|
75 | uniform float scale,
|
---|
76 | uniform float2 lightSpacePos,
|
---|
77 | uniform float depth,
|
---|
78 | uniform float2 samples[NUM_PCF_TABS],
|
---|
79 | uniform float weights[NUM_PCF_TABS],
|
---|
80 | uniform sampler2D noiseTexture
|
---|
81 | )
|
---|
82 | {
|
---|
83 | //float shadowDepth = tex2D(shadowMap, lightSpacePos).x;
|
---|
84 | //return step(depth, shadowDepth);
|
---|
85 |
|
---|
86 | float total_d = .0f;
|
---|
87 | float total_w = .0f;
|
---|
88 |
|
---|
89 | for (int i = 0; i < NUM_PCF_TABS; ++ i)
|
---|
90 | {
|
---|
91 | float2 offset;
|
---|
92 | const float w = weights[i];
|
---|
93 |
|
---|
94 | #if 1
|
---|
95 | ////////////////////
|
---|
96 | //-- add random noise: reflect around random normal vector (warning: slow!)
|
---|
97 |
|
---|
98 | float2 mynoise = tex2D(noiseTexture, IN.texCoord * 4.0f).xy;
|
---|
99 | //offset = myreflect(samples[i], mynoise);
|
---|
100 | offset = myrotate(samples[i], mynoise.x);
|
---|
101 | #else
|
---|
102 | offset = samples[i];
|
---|
103 | #endif
|
---|
104 | // weight with projected coordinate to reach similar kernel size for near and far
|
---|
105 | float2 texcoord = lightSpacePos + offset * scale;
|
---|
106 |
|
---|
107 | float shadowDepth = tex2D(shadowMap, texcoord).x;
|
---|
108 |
|
---|
109 | total_d += w * step(depth, shadowDepth);
|
---|
110 | total_w += w;
|
---|
111 | }
|
---|
112 |
|
---|
113 | total_d /= (float)total_w;
|
---|
114 |
|
---|
115 | return total_d;
|
---|
116 | }
|
---|
117 |
|
---|
118 |
|
---|
119 | pixel main_shadow(fragment IN,
|
---|
120 | uniform sampler2D colors,
|
---|
121 | uniform sampler2D positions,
|
---|
122 | uniform sampler2D normals,
|
---|
123 | uniform sampler2D shadowMap,
|
---|
124 | uniform float4x4 shadowMatrix,
|
---|
125 | uniform float sampleWidth,
|
---|
126 | uniform sampler2D noiseTex,
|
---|
127 | uniform float2 samples[NUM_PCF_TABS],
|
---|
128 | uniform float weights[NUM_PCF_TABS],
|
---|
129 | uniform float3 lightDir,
|
---|
130 | uniform float3 eyePos,
|
---|
131 | uniform float3 bl,
|
---|
132 | uniform float3 br,
|
---|
133 | uniform float3 tl,
|
---|
134 | uniform float3 tr
|
---|
135 | )
|
---|
136 | {
|
---|
137 | pixel OUT;
|
---|
138 |
|
---|
139 | const float3 normal = tex2D(normals, IN.texCoord.xy);
|
---|
140 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
141 |
|
---|
142 | /// reconstruct position from the eye space depth
|
---|
143 | float3 viewDir = IN.view;
|
---|
144 | const float lenView = length(viewDir);
|
---|
145 | viewDir /= lenView;
|
---|
146 |
|
---|
147 | const float eyeDepth = tex2Dlod(colors, float4(IN.texCoord, 0, 0)).w;
|
---|
148 | const float4 worldPos = float4(eyePos - viewDir * eyeDepth, 1);
|
---|
149 |
|
---|
150 | // diffuse intensity
|
---|
151 | const float angle = saturate(dot(normal, lightDir));
|
---|
152 | const float4 lightDiffuse = glstate.light[0].diffuse;
|
---|
153 |
|
---|
154 | float4 diffuse = lightDiffuse * angle;
|
---|
155 |
|
---|
156 | // hack: prevent shadowing the sky
|
---|
157 | const bool useShading = (color.w < 1e19f);
|
---|
158 |
|
---|
159 | // calc diffuse illumination + shadow term
|
---|
160 | if (useShading &&
|
---|
161 | (angle > 1e-3f) // shadow only if diffuse color has some minimum intensity
|
---|
162 | )
|
---|
163 | {
|
---|
164 | float4 lightSpacePos = mul(shadowMatrix, worldPos);
|
---|
165 | lightSpacePos /= lightSpacePos.w;
|
---|
166 |
|
---|
167 | float shadowTerm = CalcShadowTerm(IN, shadowMap, sampleWidth, lightSpacePos.xy, lightSpacePos.z, samples, weights, noiseTex);
|
---|
168 | diffuse *= shadowTerm;
|
---|
169 | }
|
---|
170 |
|
---|
171 | // light ambient term
|
---|
172 | const float4 ambient = glstate.light[0].ambient;
|
---|
173 | // compute shading
|
---|
174 | OUT.color = useShading ? (ambient + diffuse) * color : color;
|
---|
175 | // store scaled view vector from now on so wie don't have to normalize later (e.g., for ssao)
|
---|
176 | //OUT.color.w = color.w / lenView;
|
---|
177 | OUT.color.w = color.w;
|
---|
178 |
|
---|
179 | return OUT;
|
---|
180 | }
|
---|
181 |
|
---|
182 |
|
---|
183 | float4 Output(fragment IN, uniform sampler2D colors): COLOR
|
---|
184 | {
|
---|
185 | return tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
186 | }
|
---|
187 |
|
---|
188 |
|
---|
189 | float4 ScaleDepth(fragment IN,
|
---|
190 | uniform sampler2D colors): COLOR
|
---|
191 | {
|
---|
192 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
193 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
194 | color.w /= length(IN.view);
|
---|
195 |
|
---|
196 | return color;
|
---|
197 | }
|
---|
198 |
|
---|
199 |
|
---|
200 | /** This shader computes the reprojection and checks
|
---|
201 | if the reprojected pixel from last frame is still
|
---|
202 | valid in the current frame
|
---|
203 | */
|
---|
204 | inline float2 PixelValid(sampler2D oldTex,
|
---|
205 | float4 color,
|
---|
206 | float3 difVec,
|
---|
207 | float2 texCoord,
|
---|
208 | float3 viewDir,
|
---|
209 | float3 oldEyePos,
|
---|
210 | float4x4 modelViewProj,
|
---|
211 | float4x4 oldModelViewProj,
|
---|
212 | float3 oldbl,
|
---|
213 | float3 oldbr,
|
---|
214 | float3 oldtl,
|
---|
215 | float3 oldtr
|
---|
216 | )
|
---|
217 | {
|
---|
218 | // reconstruct position from the eye space depth
|
---|
219 | const float eyeSpaceDepth = color.w;
|
---|
220 | const float4 worldPos = float4(-viewDir * eyeSpaceDepth, 1.0f);
|
---|
221 |
|
---|
222 |
|
---|
223 |
|
---|
224 | ////////////////
|
---|
225 | //-- calculcate the current projected posiion (also used for next frame)
|
---|
226 |
|
---|
227 | float4 projPos = mul(modelViewProj, worldPos);
|
---|
228 | const float invw = 1.0f / projPos.w;
|
---|
229 | projPos *= invw;
|
---|
230 |
|
---|
231 | // compute position from old frame for dynamic objects + translational portion
|
---|
232 | //const float3 translatedPos = difVec - oldEyePos + worldPos.xyz;
|
---|
233 | const float3 translatedPos = -oldEyePos + worldPos.xyz;
|
---|
234 |
|
---|
235 |
|
---|
236 | /////////////////
|
---|
237 | //-- reproject into old frame and calculate texture position of sample in old frame
|
---|
238 |
|
---|
239 | // note: the old model view matrix only holds the view orientation part
|
---|
240 | float4 backProjPos = mul(oldModelViewProj, float4(translatedPos, 1.0f));
|
---|
241 | backProjPos /= backProjPos.w;
|
---|
242 |
|
---|
243 | // fit from unit cube into 0 .. 1
|
---|
244 | const float2 oldTexCoords = backProjPos.xy * 0.5f + 0.5f;
|
---|
245 | //const float2 oldTexCoords = texCoord;
|
---|
246 | // retrieve the sample from the last frame
|
---|
247 | const float4 oldPixel = tex2Dlod(oldTex, float4(oldTexCoords, .0f, .0f));
|
---|
248 |
|
---|
249 | // calculate eye space position of sample in old frame
|
---|
250 | const float oldEyeSpaceDepth = oldPixel.w;
|
---|
251 |
|
---|
252 | // vector from eye pos to old sample
|
---|
253 | const float3 oldViewDir = Interpol(oldTexCoords, oldbl, oldbr, oldtl, oldtr);
|
---|
254 | const float invLen = 1.0f / length(oldViewDir);
|
---|
255 | const float projectedEyeSpaceDepth = invLen * length(translatedPos);
|
---|
256 |
|
---|
257 | const float depthDif = abs(1.0f - oldEyeSpaceDepth / projectedEyeSpaceDepth);
|
---|
258 | const float squaredLen = SqrLen(difVec);
|
---|
259 |
|
---|
260 | // test if this pixel was valid in the old frame
|
---|
261 | float pixelValid;
|
---|
262 |
|
---|
263 | const bool oldDynamic = (squaredLen > DYNAMIC_OBJECTS_THRESHOLD);
|
---|
264 | const bool newDynamic = (oldPixel.z > DYNAMIC_OBJECTS_THRESHOLD);
|
---|
265 |
|
---|
266 | // actually 0 means pixel is valid
|
---|
267 | const float pixelIsValid = 0.0f;
|
---|
268 | // means that we only use slight temporal coherence over some frames
|
---|
269 | // so that there si no noticeable drag
|
---|
270 | const float pixelCouldBeValid = 2.0f;
|
---|
271 | // this pixel information has to be discarded in order to not create artifacts
|
---|
272 | const float pixelNotValid = 10.0f;
|
---|
273 |
|
---|
274 | if ((oldTexCoords.x < .0f) || (oldTexCoords.x >= 1.0f) ||
|
---|
275 | (oldTexCoords.y < .0f) || (oldTexCoords.y >= 1.0f))
|
---|
276 | {
|
---|
277 | pixelValid = pixelNotValid;
|
---|
278 | }
|
---|
279 | else if (//!((oldEyeSpaceDepth > 1e10f) || (projectedEyeSpaceDepth > 1e10f)) &&
|
---|
280 | // check if changed from dynamic to not dynamic object
|
---|
281 | ((oldDynamic && !newDynamic) || (!oldDynamic && newDynamic) ||
|
---|
282 | // check if we have a dynamic object and is a depth discontinuity
|
---|
283 | (
|
---|
284 | (oldDynamic || newDynamic) &&
|
---|
285 | //(depthDif > 1e-5f))))
|
---|
286 | (depthDif > MIN_DEPTH_DIFF))))
|
---|
287 | {
|
---|
288 | pixelValid = pixelCouldBeValid;
|
---|
289 | }
|
---|
290 | else
|
---|
291 | {
|
---|
292 | pixelValid = pixelIsValid;
|
---|
293 | }
|
---|
294 |
|
---|
295 | return float2(pixelValid, abs(oldEyeSpaceDepth - projectedEyeSpaceDepth));
|
---|
296 | }
|
---|
297 |
|
---|
298 |
|
---|
299 | /** This function is called during downsampling of the buffers
|
---|
300 | for ssao.
|
---|
301 | */
|
---|
302 | pixel PrepareSsao(fragment IN,
|
---|
303 | uniform sampler2D colorsTex,
|
---|
304 | uniform sampler2D normalsTex,
|
---|
305 | uniform sampler2D diffVals,
|
---|
306 | uniform sampler2D oldTex,
|
---|
307 | uniform float4x4 modelViewProj,
|
---|
308 | uniform float4x4 oldModelViewProj,
|
---|
309 | uniform float3 oldbl,
|
---|
310 | uniform float3 oldbr,
|
---|
311 | uniform float3 oldtl,
|
---|
312 | uniform float3 oldtr,
|
---|
313 | uniform float3 oldEyePos
|
---|
314 | )
|
---|
315 | {
|
---|
316 | pixel pix;
|
---|
317 | float4 color = tex2Dlod(colorsTex, float4(IN.texCoord, 0, 0));
|
---|
318 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
319 | color.w /= length(IN.view);
|
---|
320 |
|
---|
321 | const float4 difVec = tex2Dlod(diffVals, float4(IN.texCoord, 0, 0));
|
---|
322 | // normalize normal once more because of bilinear interpolation
|
---|
323 | const float3 normal = normalize(tex2Dlod(normalsTex, float4(IN.texCoord, 0, 0)).xyz);
|
---|
324 |
|
---|
325 | // do reprojection and filter out the pixels that are not save
|
---|
326 | float2 pValid = PixelValid(oldTex,
|
---|
327 | color,
|
---|
328 | difVec.xyz,
|
---|
329 | IN.texCoord,
|
---|
330 | IN.view,
|
---|
331 | oldEyePos,
|
---|
332 | modelViewProj,
|
---|
333 | oldModelViewProj,
|
---|
334 | oldbl, oldbr, oldtl, oldtr
|
---|
335 | );
|
---|
336 |
|
---|
337 | pix.color = color;
|
---|
338 | pix.color.xy = pValid.xy;
|
---|
339 | pix.color.z = color.w;
|
---|
340 |
|
---|
341 | pix.normal = normal;
|
---|
342 |
|
---|
343 | return pix;
|
---|
344 | }
|
---|
345 |
|
---|
346 |
|
---|
347 | float4 DownSample(fragment IN,
|
---|
348 | uniform sampler2D colors,
|
---|
349 | uniform float2 downSampleOffs[NUM_DOWNSAMPLES]): COLOR
|
---|
350 | {
|
---|
351 | // let bilinear filtering do its work
|
---|
352 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
353 | return color;
|
---|
354 | } |
---|