// input struct vtxin { float4 position: POSITION; float3 normal: NORMAL; float4 color: COLOR0; float4 texCoord: TEXCOORD0; }; // vtx output struct vtxout { float4 position: POSITION; // eye space float4 texCoord: TEXCOORD0; float4 color: COLOR0; float4 worldPos: TEXCOORD1; // world position float3 normal: TEXCOORD2; float4 projPos: TEXCOORD3; }; // fragment input struct fragin { float4 color: COLOR0; float4 position: POSITION; // eye space float4 texCoord: TEXCOORD0; float4 winPos: WPOS; float4 worldPos: TEXCOORD1; // world position float3 normal: TEXCOORD2; float4 projPos: TEXCOORD3; }; struct pixel { float4 col: COLOR0; float3 norm: COLOR1; float3 pos: COLOR2; }; #pragma position_invariant vtx vtxout vtx(vtxin IN, const uniform float4x4 ModelViewProj, uniform float4x4 ModelView) { vtxout OUT; OUT.color = IN.color; OUT.texCoord = IN.texCoord; //OUT.worldPos = mul(glstate.matrix.inverse.projection, OUT.position); OUT.worldPos = mul(ModelView, IN.position); // transform the vertex position into eye space OUT.position = mul(glstate.matrix.mvp, IN.position); OUT.normal = IN.normal; OUT.projPos = OUT.position; return OUT; } // bilinear interpolation inline float3 Interpol(float2 w, float3 bl, float3 br, float3 tl, float3 tr) { float3 x1 = lerp(bl, tl, w.y); float3 x2 = lerp(br, tr, w.y); float3 v = lerp(x1, x2, w.x); return v; } //#pragma position_invariant fragtex pixel fragtex(fragin IN, uniform sampler2D dirtTex, uniform sampler2D tex, uniform float3 eyePos, uniform float3 bl, uniform float3 br, uniform float3 tl, uniform float3 tr ) { float4 texColor = tex2D(tex, IN.texCoord.xy); // account for alpha blending if (texColor.w < 0.5f) discard; pixel pix; // save color in first render target // hack: use combination of emmisive + diffuse (emmisive used as constant ambient term) pix.col = (glstate.material.emission + glstate.material.diffuse) * texColor; // save world space normal in rt pix.norm = IN.normal; // hack: squeeze some information about ambient into the texture //pix.col.w = glstate.material.emission.x; // compute eye linear depth const float4 projPos = IN.projPos / IN.projPos.w; float2 screenCoord = projPos.xy * 0.5f + 0.5f; const float3 viewVec = Interpol(screenCoord, bl, br, tl, tr); const float invMagView = 1.0f / length(viewVec); // note: has to done in this order, otherwise strange precision problems! pix.col.w = invMagView * length(eyePos - IN.worldPos.xyz); return pix; } pixel frag(fragin IN, uniform float3 eyePos, uniform float3 bl, uniform float3 br, uniform float3 tl, uniform float3 tr) { pixel pix; // hack: use comination of emmisive + diffuse (emmisive used as constant ambient term) pix.col = glstate.material.diffuse + glstate.material.emission; pix.norm = IN.normal; // hack: squeeze some information about the ambient term into the target //pix.col.w = glstate.material.emission.x; // compute eye linear depth and scale with lenght to avoid sqr root in pixel shader const float4 projPos = IN.projPos / IN.projPos.w; float2 screenCoord = projPos.xy * 0.5f + 0.5f; const float magView = length(Interpol(screenCoord, bl, br, tl, tr)); pix.col.w = length(eyePos - IN.worldPos.xyz) / magView; return pix; }