1 | #include "../shaderenv.h"
|
---|
2 | #include "common.h"
|
---|
3 |
|
---|
4 | struct fragment
|
---|
5 | {
|
---|
6 | // normalized screen position
|
---|
7 | float4 pos: WPOS;
|
---|
8 | float2 texCoord: TEXCOORD0;
|
---|
9 | float3 view: TEXCOORD1;
|
---|
10 | };
|
---|
11 |
|
---|
12 |
|
---|
13 | struct pixel
|
---|
14 | {
|
---|
15 | float4 color: COLOR0;
|
---|
16 | float3 normal: COLOR1;
|
---|
17 | float3 diffVal: COLOR2;
|
---|
18 | };
|
---|
19 |
|
---|
20 |
|
---|
21 | /** function for standard deferred shading
|
---|
22 | */
|
---|
23 | float4 shade(fragment IN,
|
---|
24 | uniform float4 color,
|
---|
25 | uniform float3 normal,
|
---|
26 | float3 lightDir)
|
---|
27 | {
|
---|
28 | // diffuse intensity
|
---|
29 | const float angle = saturate(dot(normal, lightDir));
|
---|
30 |
|
---|
31 | float4 lightDiffuse = glstate.light[0].diffuse;
|
---|
32 | float4 diffuse = angle * lightDiffuse;
|
---|
33 |
|
---|
34 | // global ambient
|
---|
35 | const float4 ambient = glstate.light[0].ambient;
|
---|
36 |
|
---|
37 | float4 outColor;
|
---|
38 |
|
---|
39 | // hack: prevent shading the sky
|
---|
40 | if (color.w > 1e19f) outColor = color;
|
---|
41 | else outColor = (ambient + diffuse) * color;
|
---|
42 |
|
---|
43 | return outColor;
|
---|
44 | }
|
---|
45 |
|
---|
46 |
|
---|
47 |
|
---|
48 | /** The mrt shader for standard rendering
|
---|
49 | */
|
---|
50 | pixel main(fragment IN,
|
---|
51 | uniform sampler2D colors,
|
---|
52 | uniform sampler2D normals,
|
---|
53 | uniform float3 lightDir
|
---|
54 | )
|
---|
55 | {
|
---|
56 | pixel OUT;
|
---|
57 |
|
---|
58 | float4 norm = tex2D(normals, IN.texCoord);
|
---|
59 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
60 |
|
---|
61 | float3 normal = normalize(norm.xyz);
|
---|
62 | float4 col = shade(IN, color, normal, lightDir);
|
---|
63 |
|
---|
64 | OUT.color = col;
|
---|
65 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
66 | //OUT.color.w = color.w / length(IN.view);
|
---|
67 | OUT.color.w = color.w;
|
---|
68 |
|
---|
69 | return OUT;
|
---|
70 | }
|
---|
71 |
|
---|
72 |
|
---|
73 | float CalcShadowTerm(fragment IN,
|
---|
74 | uniform sampler2D shadowMap,
|
---|
75 | uniform float scale,
|
---|
76 | uniform float2 lightSpacePos,
|
---|
77 | uniform float depth,
|
---|
78 | uniform float2 samples[NUM_PCF_TABS],
|
---|
79 | uniform float weights[NUM_PCF_TABS],
|
---|
80 | uniform sampler2D noiseTexture
|
---|
81 | )
|
---|
82 | {
|
---|
83 | //float shadowDepth = tex2D(shadowMap, lightSpacePos).x;
|
---|
84 | //return step(depth, shadowDepth);
|
---|
85 |
|
---|
86 | float total_d = .0f;
|
---|
87 | float total_w = .0f;
|
---|
88 |
|
---|
89 | for (int i = 0; i < NUM_PCF_TABS; ++ i)
|
---|
90 | {
|
---|
91 | const float2 offset = samples[i];
|
---|
92 | const float w = weights[i];
|
---|
93 |
|
---|
94 | #if 1
|
---|
95 | ////////////////////
|
---|
96 | //-- add random noise: reflect around random normal vector (warning: slow!)
|
---|
97 |
|
---|
98 | float2 mynoise = tex2D(noiseTexture, IN.texCoord).xy;
|
---|
99 | const float2 offsetTransformed = myreflect(offset, mynoise);
|
---|
100 | #else
|
---|
101 | const float2 offsetTransformed = offset;
|
---|
102 | #endif
|
---|
103 | // weight with projected coordinate to reach similar kernel size for near and far
|
---|
104 | float2 texcoord = lightSpacePos + offsetTransformed * scale;
|
---|
105 |
|
---|
106 | float shadowDepth = tex2D(shadowMap, texcoord).x;
|
---|
107 |
|
---|
108 | total_d += w * step(depth, shadowDepth);
|
---|
109 | total_w += w;
|
---|
110 | }
|
---|
111 |
|
---|
112 | total_d /= (float)total_w;
|
---|
113 |
|
---|
114 | return total_d;
|
---|
115 | }
|
---|
116 |
|
---|
117 |
|
---|
118 | pixel main_shadow(fragment IN,
|
---|
119 | uniform sampler2D colors,
|
---|
120 | uniform sampler2D positions,
|
---|
121 | uniform sampler2D normals,
|
---|
122 | uniform sampler2D shadowMap,
|
---|
123 | uniform float4x4 shadowMatrix,
|
---|
124 | uniform float sampleWidth,
|
---|
125 | uniform sampler2D noiseTex,
|
---|
126 | uniform float2 samples[NUM_PCF_TABS],
|
---|
127 | uniform float weights[NUM_PCF_TABS],
|
---|
128 | uniform float3 lightDir,
|
---|
129 | uniform float3 eyePos,
|
---|
130 | uniform float3 bl,
|
---|
131 | uniform float3 br,
|
---|
132 | uniform float3 tl,
|
---|
133 | uniform float3 tr
|
---|
134 | )
|
---|
135 | {
|
---|
136 | pixel OUT;
|
---|
137 |
|
---|
138 | const float3 normal = tex2D(normals, IN.texCoord.xy);
|
---|
139 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
140 |
|
---|
141 | /// reconstruct position from the eye space depth
|
---|
142 | float3 viewDir = IN.view;
|
---|
143 | const float lenView = length(viewDir);
|
---|
144 | viewDir /= lenView;
|
---|
145 |
|
---|
146 | const float eyeDepth = tex2Dlod(colors, float4(IN.texCoord, 0, 0)).w;
|
---|
147 | const float4 worldPos = float4(eyePos - viewDir * eyeDepth, 1);
|
---|
148 |
|
---|
149 | // diffuse intensity
|
---|
150 | const float angle = saturate(dot(normal, lightDir));
|
---|
151 | const float4 lightDiffuse = glstate.light[0].diffuse;
|
---|
152 |
|
---|
153 | float4 diffuse = lightDiffuse * angle;
|
---|
154 |
|
---|
155 | // hack: prevent shadowing the sky
|
---|
156 | const bool useShading = (color.w < 1e19f);
|
---|
157 |
|
---|
158 | // calc diffuse illumination + shadow term
|
---|
159 | if (useShading &&
|
---|
160 | (angle > 1e-3f) // shadow only if diffuse color has some minimum intensity
|
---|
161 | )
|
---|
162 | {
|
---|
163 | float4 lightSpacePos = mul(shadowMatrix, worldPos);
|
---|
164 | lightSpacePos /= lightSpacePos.w;
|
---|
165 |
|
---|
166 | float shadowTerm = CalcShadowTerm(IN, shadowMap, sampleWidth, lightSpacePos.xy, lightSpacePos.z, samples, weights, noiseTex);
|
---|
167 | diffuse *= shadowTerm;
|
---|
168 | }
|
---|
169 |
|
---|
170 | // light ambient term
|
---|
171 | const float4 ambient = glstate.light[0].ambient;
|
---|
172 | // compute shading
|
---|
173 | OUT.color = useShading ? (ambient + diffuse) * color : color;
|
---|
174 | // store scaled view vector from now on so wie don't have to normalize later (e.g., for ssao)
|
---|
175 | //OUT.color.w = color.w / lenView;
|
---|
176 | OUT.color.w = color.w;
|
---|
177 |
|
---|
178 | return OUT;
|
---|
179 | }
|
---|
180 |
|
---|
181 |
|
---|
182 | float4 Output(fragment IN, uniform sampler2D colors): COLOR
|
---|
183 | {
|
---|
184 | return tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
185 | }
|
---|
186 |
|
---|
187 |
|
---|
188 | float4 ScaleDepth(fragment IN,
|
---|
189 | uniform sampler2D colors): COLOR
|
---|
190 | {
|
---|
191 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
192 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
193 | color.w /= length(IN.view);
|
---|
194 |
|
---|
195 | return color;
|
---|
196 | }
|
---|
197 |
|
---|
198 |
|
---|
199 | /** This shader computes the reprojection and checks
|
---|
200 | if the reprojected pixel from last frame is still
|
---|
201 | valid in the current frame
|
---|
202 | */
|
---|
203 | inline float2 PixelValid(sampler2D oldTex,
|
---|
204 | float4 color,
|
---|
205 | float3 difVec,
|
---|
206 | float2 texCoord,
|
---|
207 | float3 viewDir,
|
---|
208 | float3 oldEyePos,
|
---|
209 | float4x4 modelViewProj,
|
---|
210 | float4x4 oldModelViewProj,
|
---|
211 | float3 oldbl,
|
---|
212 | float3 oldbr,
|
---|
213 | float3 oldtl,
|
---|
214 | float3 oldtr
|
---|
215 | )
|
---|
216 | {
|
---|
217 | // reconstruct position from the eye space depth
|
---|
218 | const float eyeSpaceDepth = color.w;
|
---|
219 | const float4 worldPos = float4(-viewDir * eyeSpaceDepth, 1.0f);
|
---|
220 |
|
---|
221 |
|
---|
222 |
|
---|
223 | ////////////////
|
---|
224 | //-- calculcate the current projected posiion (also used for next frame)
|
---|
225 |
|
---|
226 | float4 projPos = mul(modelViewProj, worldPos);
|
---|
227 | const float invw = 1.0f / projPos.w;
|
---|
228 | projPos *= invw;
|
---|
229 |
|
---|
230 | // compute position from old frame for dynamic objects + translational portion
|
---|
231 | const float3 translatedPos = difVec - oldEyePos + worldPos.xyz;
|
---|
232 |
|
---|
233 |
|
---|
234 | /////////////////
|
---|
235 | //-- reproject into old frame and calculate texture position of sample in old frame
|
---|
236 |
|
---|
237 | // note: the old model view matrix only holds the view orientation part
|
---|
238 | float4 backProjPos = mul(oldModelViewProj, float4(translatedPos, 1.0f));
|
---|
239 | backProjPos /= backProjPos.w;
|
---|
240 |
|
---|
241 | // fit from unit cube into 0 .. 1
|
---|
242 | const float2 oldTexCoords = backProjPos.xy * 0.5f + 0.5f;
|
---|
243 | //const float2 oldTexCoords = texCoord;
|
---|
244 | // retrieve the sample from the last frame
|
---|
245 | const float4 oldPixel = tex2Dlod(oldTex, float4(oldTexCoords, .0f, .0f));
|
---|
246 |
|
---|
247 | // calculate eye space position of sample in old frame
|
---|
248 | const float oldEyeSpaceDepth = oldPixel.w;
|
---|
249 |
|
---|
250 | // vector from eye pos to old sample
|
---|
251 | const float3 oldViewDir = Interpol(oldTexCoords, oldbl, oldbr, oldtl, oldtr);
|
---|
252 | const float invLen = 1.0f / length(oldViewDir);
|
---|
253 | const float projectedEyeSpaceDepth = invLen * length(translatedPos);
|
---|
254 |
|
---|
255 | const float depthDif = abs(1.0f - oldEyeSpaceDepth / projectedEyeSpaceDepth);
|
---|
256 | const float squaredLen = SqrLen(difVec);
|
---|
257 |
|
---|
258 | // test if this pixel was valid in the old frame
|
---|
259 | float pixelValid;
|
---|
260 |
|
---|
261 | const bool oldDynamic = (squaredLen > DYNAMIC_OBJECTS_THRESHOLD);
|
---|
262 | const bool newDynamic = (oldPixel.z > DYNAMIC_OBJECTS_THRESHOLD);
|
---|
263 |
|
---|
264 | //const float xOffs = 1.0f / 1024.0f; const float yOffs = 1.0f / 768.0f;
|
---|
265 | //const float eps = 1e-6f;
|
---|
266 |
|
---|
267 | // actually 0 means pixel is valid
|
---|
268 | const float pixelIsValid = 0.0f;
|
---|
269 | // means that we only use slight temporal coherence over some frames
|
---|
270 | // so that there si no noticeable drag
|
---|
271 | const float pixelCouldBeValid = 2.0f;
|
---|
272 | // this pixel information has to be discarded in order to not create artifacts
|
---|
273 | const float pixelNotValid = 10.0f;
|
---|
274 |
|
---|
275 | if ((oldTexCoords.x < .0f) || (oldTexCoords.x >= 1.0f) ||
|
---|
276 | (oldTexCoords.y < .0f) || (oldTexCoords.y >= 1.0f))
|
---|
277 | {
|
---|
278 | pixelValid = pixelNotValid;
|
---|
279 | }
|
---|
280 | else
|
---|
281 | if (
|
---|
282 | // check if changed from dynamic to not dynamic object
|
---|
283 | (oldDynamic && !newDynamic) || (!oldDynamic && newDynamic) ||
|
---|
284 | // check if we have a dynamic object and is a depth discontinuity
|
---|
285 | ((oldDynamic || newDynamic) && (depthDif <= MIN_DEPTH_DIFF)))
|
---|
286 | {
|
---|
287 | pixelValid = pixelCouldBeValid;
|
---|
288 | }
|
---|
289 | else
|
---|
290 | {
|
---|
291 | pixelValid = pixelIsValid;
|
---|
292 | }
|
---|
293 |
|
---|
294 | return float2(pixelValid, abs(oldEyeSpaceDepth - projectedEyeSpaceDepth));
|
---|
295 | }
|
---|
296 |
|
---|
297 |
|
---|
298 | /** This function is called during downsampling of the buffers
|
---|
299 | for ssao.
|
---|
300 | */
|
---|
301 | pixel PrepareSsao(fragment IN,
|
---|
302 | uniform sampler2D colorsTex,
|
---|
303 | uniform sampler2D normalsTex,
|
---|
304 | uniform sampler2D diffVals,
|
---|
305 | uniform sampler2D oldTex,
|
---|
306 | uniform float4x4 modelViewProj,
|
---|
307 | uniform float4x4 oldModelViewProj,
|
---|
308 | uniform float3 oldbl,
|
---|
309 | uniform float3 oldbr,
|
---|
310 | uniform float3 oldtl,
|
---|
311 | uniform float3 oldtr,
|
---|
312 | uniform float3 oldEyePos
|
---|
313 | )
|
---|
314 | {
|
---|
315 | pixel pix;
|
---|
316 | float4 color = tex2Dlod(colorsTex, float4(IN.texCoord, 0, 0));
|
---|
317 | // store scaled view vector so wie don't have to normalize for e.g., ssao
|
---|
318 | color.w /= length(IN.view);
|
---|
319 |
|
---|
320 | const float4 difVec = tex2Dlod(diffVals, float4(IN.texCoord, 0, 0));
|
---|
321 | const float3 normal = normalize(tex2Dlod(normalsTex, float4(IN.texCoord, 0, 0)).xyz);
|
---|
322 |
|
---|
323 | // do reprojection and filter out the pixels that are not save
|
---|
324 | float2 pValid = PixelValid(oldTex,
|
---|
325 | color,
|
---|
326 | difVec.xyz,
|
---|
327 | IN.texCoord,
|
---|
328 | IN.view,
|
---|
329 | oldEyePos,
|
---|
330 | modelViewProj,
|
---|
331 | oldModelViewProj,
|
---|
332 | oldbl, oldbr, oldtl, oldtr
|
---|
333 | );
|
---|
334 |
|
---|
335 | pix.color = color;
|
---|
336 | pix.color.xy = pValid.xy;
|
---|
337 | pix.normal = normal;
|
---|
338 |
|
---|
339 | return pix;
|
---|
340 | }
|
---|
341 |
|
---|
342 |
|
---|
343 | float4 DownSample(fragment IN,
|
---|
344 | uniform sampler2D colors,
|
---|
345 | uniform float2 downSampleOffs[NUM_DOWNSAMPLES]): COLOR
|
---|
346 | {
|
---|
347 | // let bilinear filtering do its work
|
---|
348 | float4 color = tex2Dlod(colors, float4(IN.texCoord, 0, 0));
|
---|
349 | return color;
|
---|
350 | } |
---|