// input struct vtxin { float4 position: POSITION; float4 normal: NORMAL; float4 color: COLOR0; float4 texCoord: TEXCOORD0; }; // vtx output struct vtxout { float4 position: POSITION; float4 texCoord: TEXCOORD0; float4 color: COLOR0; // eye position float4 eyePos: TEXCOORD1; float4 normal: TEXCOORD2; float4 worldPos: TEXCOORD3; float4 oldWorldPos: TEXCOORD4; }; // fragment input struct fragin { float4 color: COLOR0; float4 texCoord: TEXCOORD0; float4 winPos: WPOS; // eye position float4 eyePos: TEXCOORD1; float4 normal: TEXCOORD2; float4 worldPos: TEXCOORD3; float4 oldWorldPos: TEXCOORD4; }; struct pixel { float4 col: COLOR0; float3 norm: COLOR1; float3 offsVec: COLOR2; }; #pragma position_invariant vtx vtxout vtx(vtxin IN, uniform float4x4 viewMatrix, uniform float4x4 modelMatrix, uniform float4x4 oldModelMatrix) { vtxout OUT; OUT.color = IN.color; OUT.texCoord = IN.texCoord; // transform the vertex position into eye space OUT.eyePos = mul(glstate.matrix.modelview[0], IN.position); // transform the vertex position into post projection space OUT.position = mul(glstate.matrix.mvp, IN.position); // transform the old vertex position into world space OUT.worldPos = mul(modelMatrix, IN.position); // transform the old vertex position into world space OUT.oldWorldPos = mul(oldModelMatrix, IN.position); // the normal has to be correctly transformed with the inverse transpose OUT.normal = mul(glstate.matrix.invtrans.modelview[0], IN.normal); //OUT.normal = IN.normal; return OUT; } pixel fragtex(fragin IN, uniform sampler2D tex: TEXUNIT0, uniform float4x4 viewMatrix ) { float4 texColor = tex2D(tex, IN.texCoord.xy); // account for alpha blending if (texColor.w < .5f) discard; pixel pix; // save color in first render target // hack: use combination of emmisive + diffuse (emmisive used as constant ambient term) pix.col = (glstate.material.emission + glstate.material.diffuse) * texColor; // save world space normal in rt => transform back into world space by // multiplying with inverse view. since transforming normal with T means to // multiply with the inverse transpose of T, we multiple with // Transp(Inv(Inv(view))) = Transp(view) pix.norm = normalize(mul(transpose(viewMatrix), IN.normal).xyz); //pix.norm = IN.normal.xyz; // compute eye linear depth pix.col.w = length(IN.eyePos.xyz); // the scene entity id //pix.id = glstate.fog.color.xyz; // the offset to the world pos from old frame //pix.offsVec = float3(0,0,0); pix.offsVec = IN.oldWorldPos.xyz - IN.worldPos.xyz; return pix; } pixel frag(fragin IN, uniform float4x4 viewMatrix) { pixel pix; // hack: use comination of emmisive + diffuse (emmisive used as constant ambient term) pix.col = glstate.material.diffuse + glstate.material.emission; // save world space normal in rt => transform back into world space by // multiplying with inverse view. since transforming normal with T means to // multiply with the inverse transpose of T, we multiple with Transp(Inv(Inv(view))) = Transp(view) pix.norm = normalize(mul(transpose(viewMatrix), IN.normal).xyz); //pix.norm = IN.normal.xyz; // eye space depth pix.col.w = length(IN.eyePos.xyz); // the scene entity id //pix.id = glstate.fog.color.xyz; // the offset to the world pos from old frame pix.offsVec = IN.oldWorldPos.xyz - IN.worldPos.xyz; //pix.offsVec = float3(0,0,0); return pix; }