multiple lights with shadow

Hi,
I try to create a scene with more then 1 light source, with shadows.
I know that for the shadows i need a depth map for each lights but I dont know if i need to compute all lights in the shader.

i have this shader for 1 light:
vert:


varying vec4 vPixToLightTBN[1];	
varying vec3 vPixToEyeTBN;					
varying vec3 vVertexMV;
varying vec3 vNormalMV;
varying vec3 vPixToLightMV;
varying vec3 vLightDirMV;

// SHADOW MAPPING //
uniform int enable_shadow_mapping;
////////////////////


#define MODE_PHONG		0
#define MODE_BUMP		1
#define MODE_PARALLAX	2
#define MODE_RELIEF		3
uniform int mode;

#define LIGHT_DIRECTIONAL		0.0
#define LIGHT_OMNIDIRECTIONAL	1.0
#define LIGHT_SPOT				2.0
				
void main(void)
{

	gl_Position = ftransform();
	gl_TexCoord[0] = gl_MultiTexCoord0;
	
	vec3 vTangent = gl_MultiTexCoord1.xyz;
	vec3 n = normalize(gl_NormalMatrix * gl_Normal);
	vec3 t = normalize(gl_NormalMatrix * vTangent);
	vec3 b = cross(n, t);
	
	vNormalMV = n;
	
	vec4 vLightPosMV = gl_LightSource[0].position;		
	vVertexMV = vec3(gl_ModelViewMatrix * gl_Vertex);	
	
	vec3 tmpVec;



	if(vLightPosMV.w == LIGHT_DIRECTIONAL)
		tmpVec = -vLightPosMV.xyz;					
	else
		tmpVec = vLightPosMV.xyz - vVertexMV.xyz;	

	vPixToLightMV = tmpVec;
/*
	if(mode == MODE_PHONG)
	{
		vPixToLightTBN[0].xyz = tmpVec.xyz;
		vPixToLightTBN[0].w = vLightPosMV.w;	
		
		vPixToEyeTBN = -vVertexMV;
	}
	else*/
	{
		
		vPixToLightTBN[0].x = dot(tmpVec, t);
		vPixToLightTBN[0].y = dot(tmpVec, b);
		vPixToLightTBN[0].z = dot(tmpVec, n);
		vPixToLightTBN[0].w = vLightPosMV.w;	

		tmpVec = -vVertexMV;
		vPixToEyeTBN.x = dot(tmpVec, t);
		vPixToEyeTBN.y = dot(tmpVec, b);
		vPixToEyeTBN.z = dot(tmpVec, n);
	}
	
	
	
	if(length(gl_LightSource[0].spotDirection) > 0.001)
	{
		
		vLightDirMV = normalize(gl_LightSource[0].spotDirection);
		vPixToLightTBN[0].w = LIGHT_SPOT;
	}
	else
	{
		
		vLightDirMV = gl_LightSource[0].spotDirection;
	}
	
	if(enable_shadow_mapping != 0) {
		vec4 pos = gl_ModelViewMatrix * gl_Vertex;

		pos = gl_TextureMatrix[0] * pos;

		gl_TexCoord[1] = gl_TextureMatrix[1] * pos;
	}
}

frag:


varying vec4 vPixToLightTBN[1];	
varying vec3 vPixToEyeTBN;		
varying vec3 vVertexMV;
varying vec3 vNormalMV;
varying vec3 vPixToLightMV;
varying vec3 vLightDirMV;

uniform sampler2D texDiffuse;
uniform sampler2D texNormalHeightMap;

#define MODE_PHONG		0
#define MODE_BUMP		1
#define MODE_PARALLAX	2
#define MODE_RELIEF		3

uniform int mode;
uniform float parallax_factor;
uniform float relief_factor;
uniform float tile_factor;


// SHADOW MAPPING //
uniform int depth_map_size;
uniform int enable_shadow_mapping;	
uniform sampler2DShadow texDepthMapFromLight;
uniform sampler2D texDiffuseProjected;
#define Z_TEST_SIGMA 0.0001
////////////////////

#define LIGHT_DIRECTIONAL		0.0
#define LIGHT_OMNIDIRECTIONAL	1.0
#define LIGHT_SPOT				2.0

float ShadowMapping(vec4 vVertexFromLightView, out vec3 vPixPosInDepthMap);
vec4  Phong(vec2 uv, vec3 vNormalTBN, vec3 vEyeTBN, vec4 vLightTBN);
vec4  NormalMapping(vec2 uv, vec3 vPixToEyeTBN, vec4 vPixToLightTBN, bool bParallax);
float ReliefMapping_RayIntersection(in vec2 A, in vec2 AB);
vec4  ReliefMapping(vec2 uv);





void main (void)
{
	gl_FragDepth = gl_FragCoord.z;
	vec4 vPixToLightTBNcurrent = vPixToLightTBN[0];
	
	vec4 color = vec4(1.0, 0.0, 0.0, 1.0);


	if(mode == MODE_PHONG)
		color = Phong(gl_TexCoord[0].st*tile_factor, vec3(0.0, 0.0, 1.0), vPixToEyeTBN, vPixToLightTBNcurrent);

	else if(mode == MODE_RELIEF)
		color = ReliefMapping(gl_TexCoord[0].st*tile_factor);
		
	else if(mode == MODE_BUMP)
		color = NormalMapping(gl_TexCoord[0].st*tile_factor, vPixToEyeTBN, vPixToLightTBNcurrent, false);
	
	else if(mode == MODE_PARALLAX)
		color = NormalMapping(gl_TexCoord[0].st*tile_factor, vPixToEyeTBN, vPixToLightTBNcurrent, true);


	gl_FragColor = color;

}







float ReliefMapping_RayIntersection(in vec2 A, in vec2 AB)
{
	const int num_steps_lin = 10;
	const int num_steps_bin = 15;
	
	float linear_step = 1.0 / (float(num_steps_lin));
	float depth = 0.0; 
	

	float best_depth = 1.0;
	float step = linear_step;
	

	for(int i=0; i<num_steps_lin-1; i++){
		depth += step;
		float h = 1.0 - texture2D(texNormalHeightMap, A+AB*depth).a;
		
		if (depth >= h) { 
			best_depth = depth; 
			i = num_steps_lin-1;
		}
		
	}
	
	
	step = linear_step/2.0;
	depth = best_depth - step;
	

	for(int i=0; i<num_steps_bin; i++)
	{
		float h = 1.0 - texture2D(texNormalHeightMap, A+AB*depth).a;
		
		step /= 2.0;
		if (depth >= h) {
			best_depth = depth;
			depth -= step;
		}
		else {
			best_depth = depth;
			depth += step;
		}
	}
	
	return best_depth;
}


vec4 ReliefMapping(vec2 uv)
{	
	vec4 vPixToLightTBNcurrent = vPixToLightTBN[0];
	vec3 viewVecTBN = normalize(vPixToEyeTBN);
	

	vec2 A = uv;
	vec2 AB = relief_factor * vec2(-viewVecTBN.x, viewVecTBN.y)/viewVecTBN.z;

	float h = ReliefMapping_RayIntersection(A, AB);
	
	vec2 uv_offset = h * AB;
	

	vec3 p = vVertexMV;
	vec3 v = normalize(p);
		

	p += v*h*viewVecTBN.z;	
	
	
	float near = 0.1;
	float far = 800.0;	
	vec2 planes;
	planes.x = -far/(far-near);
	planes.y = -far*near/(far-near);
	gl_FragDepth =((planes.x*p.z+planes.y)/-p.z);
	


	return NormalMapping(uv+uv_offset, vPixToEyeTBN, vPixToLightTBNcurrent, false);
}





vec4 NormalMapping(vec2 uv, vec3 vPixToEyeTBN, vec4 vPixToLightTBN, bool bParallax)
{	
	vec3 lightVecTBN = normalize(vPixToLightTBN.xyz);
	vec3 viewVecTBN = normalize(vPixToEyeTBN);

	vec2 vTexCoord = uv;
	if(bParallax) {			

		float height = texture2D(texNormalHeightMap, uv).a;
		vTexCoord = uv + ((height-0.5)* parallax_factor * (vec2(viewVecTBN.x, -viewVecTBN.y)/viewVecTBN.z));
	}
	

	vec3 normalTBN = normalize(texture2D(texNormalHeightMap, vTexCoord).xyz * 2.0 - 1.0);
	
	return Phong(vTexCoord, normalTBN, vPixToEyeTBN, vPixToLightTBN);
}






vec4 Phong(vec2 uv, vec3 vNormalTBN, vec3 vEyeTBN, vec4 vLightTBN)
{

	float att = 1.0;
	if(vLightTBN.w != LIGHT_DIRECTIONAL) {
		float dist = length(vLightTBN.xyz);
		att = 1.0/(gl_LightSource[0].constantAttenuation + gl_LightSource[0].linearAttenuation*dist + gl_LightSource[0].quadraticAttenuation*dist*dist);
		att = max(att, 0.0);	
	}
	
	vec3 L = normalize(vLightTBN.xyz);
	vec3 N = normalize(vNormalTBN.xyz);
	vec3 V = normalize(vEyeTBN.xyz);
	
	

	vec4 base = texture2D(texDiffuse, uv);	

	float iDiffuse = max(dot(L, N), 0.0);	
	float iSpecular = pow(clamp(dot(reflect(-L, N), V), 0.0, 1.0), gl_FrontMaterial.shininess );
	
	
	vec4 cAmbient = gl_LightSource[0].ambient * gl_FrontMaterial.ambient;
	vec4 cDiffuse = gl_LightSource[0].diffuse * gl_FrontMaterial.diffuse * iDiffuse;	
	vec4 cSpecular = gl_LightSource[0].specular * gl_FrontMaterial.specular * iSpecular;	
		
	

	if(vLightTBN.w > 1.5)
	{

		if(dot(normalize(vPixToLightMV.xyz), normalize(-vLightDirMV.xyz)) < gl_LightSource[0].spotCosCutoff)
		{
			cDiffuse = vec4(0.0, 0.0, 0.0, 1.0);
			cSpecular = vec4(0.0, 0.0, 0.0, 1.0);
		}
		else
		{

			if(enable_shadow_mapping != 0) {
				vec3 vPixPosInDepthMap;
				float shadow = ShadowMapping(gl_TexCoord[1], vPixPosInDepthMap);
				cDiffuse = (shadow) * cDiffuse;
				cSpecular = (shadow) * cSpecular;
				

				if(enable_shadow_mapping == 2) {
					vec4 cProjected = texture2D(texDiffuseProjected, vec2(vPixPosInDepthMap.s, 1.0-vPixPosInDepthMap.t));
					base.xyz = mix(base.xyz, cProjected.xyz, shadow/2.0);
				}

			}
		}
	}

	else
	{

	}


	vec4 color = cAmbient * base + (cDiffuse * base + cSpecular) * att;
	color.a = base.a;
	
	return color;	
}

float ShadowMapping(vec4 vVertexFromLightView, out vec3 vPixPosInDepthMap)
{
	float fShadow = 0.0;
						
	vec2 tOffset[3*3];
	tOffset[0] = vec2(-1.0, -1.0); tOffset[1] = vec2(0.0, -1.0); tOffset[2] = vec2(1.0, -1.0);
	tOffset[3] = vec2(-1.0,  0.0); tOffset[4] = vec2(0.0,  0.0); tOffset[5] = vec2(1.0,  0.0);
	tOffset[6] = vec2(-1.0,  1.0); tOffset[7] = vec2(0.0,  1.0); tOffset[8] = vec2(1.0,  1.0);


	vPixPosInDepthMap = vVertexFromLightView.xyz/vVertexFromLightView.w;	
	vPixPosInDepthMap = (vPixPosInDepthMap + 1.0) * 0.5;					
	
	
	vec4 vDepthMapColor = shadow2D(texDepthMapFromLight, vPixPosInDepthMap);

	if((vDepthMapColor.z+Z_TEST_SIGMA) < vPixPosInDepthMap.z)
	{
		fShadow = 0.0;
		
		
		if( length(vVertexMV.xyz) < 12.0 )
		{
			for(int i=0; i<9; i++)
			{
				vec2 offset = tOffset[i] / (float(depth_map_size));
				// Couleur du pixel sur la depth map
				vec4 vDepthMapColor = shadow2D(texDepthMapFromLight, vPixPosInDepthMap + vec3(offset.s, offset.t, 0.0));
		
				if((vDepthMapColor.z+Z_TEST_SIGMA) < vPixPosInDepthMap.z) {
					fShadow += 0.0;
				}
				else {
					fShadow += 1.0 / 9.0;
				}
			}
		}
	}
	else
	{
		fShadow = 1.0;
	}

	fShadow = clamp(fShadow, 0.0, 1.0);
	return fShadow;
}

now for multiple, i chane the relevance variables to array. and compute each light. but its not work. anyone know how to solve it?

TIA

its my shader for multiple lights(dont work):
vert:




varying vec4 vPixToLightTBN[8];	// Vecteur du pixel courant &#1488; la lumi&#1496;re
varying vec3 vPixToEyeTBN[8];					// Vecteur du pixel courant &#1488; l'oeil
varying vec3 vVertexMV;
varying vec3 vNormalMV;
varying vec3 vPixToLightMV[8];
varying vec3 vLightDirMV[8];

// SHADOW MAPPING //
uniform int enable_shadow_mapping;
////////////////////


#define MODE_PHONG		0
#define MODE_BUMP		1
#define MODE_PARALLAX	2
#define MODE_RELIEF		3
uniform int mode;

#define LIGHT_DIRECTIONAL		0.0
#define LIGHT_OMNIDIRECTIONAL	1.0
#define LIGHT_SPOT				2.0
				
void main(void)
{

	gl_Position = ftransform();
	gl_TexCoord[0] = gl_MultiTexCoord0;
	
	vec3 vTangent = gl_MultiTexCoord1.xyz;
	vec3 n = normalize(gl_NormalMatrix * gl_Normal);
	vec3 t = normalize(gl_NormalMatrix * vTangent);
	vec3 b = cross(n, t);
	
	vNormalMV = n;
	
	vec4 vLightPosMV[8];
	vLightPosMV[0] = gl_LightSource[0].position;		// Position (ou direction) de la lumi&#1496;re dans la MV
	vLightPosMV[1] = gl_LightSource[1].position;
	vVertexMV = vec3(gl_ModelViewMatrix * gl_Vertex);	// Position du vertex dans la MV
	
	vec3 tmpVec[8];


for (int i=0;i<2;i++)
{
	if(vLightPosMV[i].w == LIGHT_DIRECTIONAL)
		tmpVec[i] = -vLightPosMV[i].xyz;					// Lumi&#1496;re directionelle
	else
		tmpVec[i] = vLightPosMV[i].xyz - vVertexMV.xyz;	// Lumi&#1496;re ponctuelle

	vPixToLightMV[i] = tmpVec[i];
/*
	if(mode == MODE_PHONG)
	{
		vPixToLightTBN[i].xyz = tmpVec[i].xyz;
		vPixToLightTBN[i].w = vLightPosMV[i].w;	// ponctuelle ou directionnelle
		
		vPixToEyeTBN[i] = -vVertexMV;
	}
	else*/
	{
		// Position ou direction de la lumi&#1496;re
		vPixToLightTBN[i].x = dot(tmpVec[i], t);
		vPixToLightTBN[i].y = dot(tmpVec[i], b);
		vPixToLightTBN[i].z = dot(tmpVec[i], n);
		vPixToLightTBN[i].w = vLightPosMV[i].w;	// ponctuelle ou directionnelle
			
		// Vecteur vue
		tmpVec[i] = -vVertexMV;
		vPixToEyeTBN[i].x = dot(tmpVec[i], t);
		vPixToEyeTBN[i].y = dot(tmpVec[i], b);
		vPixToEyeTBN[i].z = dot(tmpVec[i], n);
	}
	
	
	
	if(length(gl_LightSource[i].spotDirection) > 0.001)
	{
		// Lumi&#1496;re spot
		vLightDirMV[i] = normalize(gl_LightSource[i].spotDirection);
		vPixToLightTBN[i].w = LIGHT_SPOT;
	}
	else
	{
		// Lumi&#1496;re non spot
		vLightDirMV[i] = gl_LightSource[i].spotDirection;
	}
}
	if(enable_shadow_mapping != 0) {
		// pos a subit les transformations + la cam&#1497;ra
		vec4 pos = gl_ModelViewMatrix * gl_Vertex;
		// on multiplie par la matrice inverse de la cam&#1497;ra : pos a seulement subit les transformations
		pos = gl_TextureMatrix[0] * pos;
		// on multiplie par la matrice de la lumi&#1496;re : position du Vertex dans le rep&#1496;re de la lumi&#1496;re
		gl_TexCoord[1] = gl_TextureMatrix[1] * pos;
	}
}

frag:




varying vec4 vPixToLightTBN[8];	// Vecteur du pixel courant &#1488; la lumi&#1496;re
varying vec3 vPixToEyeTBN[8];		// Vecteur du pixel courant &#1488; l'oeil
varying vec3 vVertexMV;
varying vec3 vNormalMV;
varying vec3 vPixToLightMV[8];
varying vec3 vLightDirMV[8];
int	 curr;
uniform sampler2D texDiffuse;
uniform sampler2D texNormalHeightMap;

#define MODE_PHONG		0
#define MODE_BUMP		1
#define MODE_PARALLAX	2
#define MODE_RELIEF		3

uniform int mode;
uniform float parallax_factor;
uniform float relief_factor;
uniform float tile_factor;


// SHADOW MAPPING //
uniform int depth_map_size;
uniform int enable_shadow_mapping;	// 0->no  1->shadow mapping  2->shadow mapping + projected texture
uniform sampler2DShadow texDepthMapFromLight;
uniform sampler2DShadow texDepthMapFromLight2;
uniform sampler2D texDiffuseProjected;
#define Z_TEST_SIGMA 0.0001
////////////////////

#define LIGHT_DIRECTIONAL		0.0
#define LIGHT_OMNIDIRECTIONAL	1.0
#define LIGHT_SPOT				2.0

float ShadowMapping(vec4 vVertexFromLightView, out vec3 vPixPosInDepthMap);
vec4  Phong(vec2 uv, vec3 vNormalTBN, vec3 vEyeTBN, vec4 vLightTBN);
vec4  NormalMapping(vec2 uv, vec3 nPixToEyeTBN, vec4 nPixToLightTBN, bool bParallax);
float ReliefMapping_RayIntersection(in vec2 A, in vec2 AB);
vec4  ReliefMapping(vec2 uv);





void main (void)
{
	gl_FragDepth = gl_FragCoord.z;
	vec4 vPixToLightTBNcurrent;
	
	vec4 color[8]; 
	curr = 0;
	
	color[curr] = vec4(1.0, 0.0, 0.0, 1.0);
	vPixToLightTBNcurrent = vPixToLightTBN[curr];
	if(mode == MODE_PHONG)
		color[curr] = Phong(gl_TexCoord[0].st*tile_factor, vec3(0.0, 0.0, 1.0), vPixToEyeTBN[curr], vPixToLightTBNcurrent);

	else if(mode == MODE_RELIEF)
		color[curr] = ReliefMapping(gl_TexCoord[0].st*tile_factor);
		
	else if(mode == MODE_BUMP)
		color[curr] = NormalMapping(gl_TexCoord[0].st*tile_factor, vPixToEyeTBN[curr], vPixToLightTBNcurrent, false);
	
	else if(mode == MODE_PARALLAX)
		color[curr] = NormalMapping(gl_TexCoord[0].st*tile_factor, vPixToEyeTBN[curr], vPixToLightTBNcurrent, true);


	gl_FragColor = color[curr];

}







float ReliefMapping_RayIntersection(in vec2 A, in vec2 AB)
{
	const int num_steps_lin = 10;
	const int num_steps_bin = 15;
	
	float linear_step = 1.0 / (float(num_steps_lin));
	float depth = 0.0; // current depth position
	
	// best match found (starts with last position 1.0)
	float best_depth = 1.0;
	float step = linear_step;
	
	// search from front to back for first point inside the object
	for(int i=0; i<num_steps_lin-1; i++){
		depth += step;
		float h = 1.0 - texture2D(texNormalHeightMap, A+AB*depth).a;
		
		if (depth >= h) { // h est dans la heightmap
			best_depth = depth; // store best depth
			i = num_steps_lin-1;
		}
		
	}
	
	
	// l'intersection se situe entre (depth) et (depth-step);
	// on se place donc &#1488; (depth - step/2) pour commencer
	step = linear_step/2.0;
	depth = best_depth - step;
	
	// recherche par dichotomie
	for(int i=0; i<num_steps_bin; i++)
	{
		float h = 1.0 - texture2D(texNormalHeightMap, A+AB*depth).a;
		
		step /= 2.0;
		if (depth >= h) {
			best_depth = depth;
			depth -= step;
		}
		else {
			best_depth = depth;
			depth += step;
		}
	}
	
	return best_depth;
}


vec4 ReliefMapping(vec2 uv)
{	
	vec4 vPixToLightTBNcurrent = vPixToLightTBN[curr];
	vec3 viewVecTBN = normalize(vPixToEyeTBN[curr]);
	
	// size and start position of search in texture space
	vec2 A = uv;
	vec2 AB = relief_factor * vec2(-viewVecTBN.x, viewVecTBN.y)/viewVecTBN.z;

	float h = ReliefMapping_RayIntersection(A, AB);
	
	vec2 uv_offset = h * AB;
	

	vec3 p = vVertexMV;
	vec3 v = normalize(p);
		
	// compute light direction
	p += v*h*viewVecTBN.z;	
	
	
	float near = 0.1;
	float far = 800.0;	
	vec2 planes;
	planes.x = -far/(far-near);
	planes.y = -far*near/(far-near);
	gl_FragDepth =((planes.x*p.z+planes.y)/-p.z);
	


	return NormalMapping(uv+uv_offset, vPixToEyeTBN[curr], vPixToLightTBNcurrent, false);
}





vec4 NormalMapping(vec2 uv, vec3 nPixToEyeTBN, vec4 nPixToLightTBN, bool bParallax)
{	
	vec3 lightVecTBN = normalize(nPixToLightTBN.xyz);
	vec3 viewVecTBN = normalize(nPixToEyeTBN);

	vec2 vTexCoord = uv;
	if(bParallax) {			
		// Calculate offset, scale & biais
		float height = texture2D(texNormalHeightMap, uv).a;
		vTexCoord = uv + ((height-0.5)* parallax_factor * (vec2(viewVecTBN.x, -viewVecTBN.y)/viewVecTBN.z));
	}
	
	// on trouve la normale pertub&#1497;e dans l'espace TBN
	vec3 normalTBN = normalize(texture2D(texNormalHeightMap, vTexCoord).xyz * 2.0 - 1.0);
	
//// ECLAIRAGE :
	return Phong(vTexCoord, normalTBN, nPixToEyeTBN, nPixToLightTBN);
}






vec4 Phong(vec2 uv, vec3 vNormalTBN, vec3 vEyeTBN, vec4 vLightTBN)
{

	float att = 1.0;
	if(vLightTBN.w != LIGHT_DIRECTIONAL) {
		float dist = length(vLightTBN.xyz);
		att = 1.0/(gl_LightSource[curr].constantAttenuation + gl_LightSource[curr].linearAttenuation*dist + gl_LightSource[curr].quadraticAttenuation*dist*dist);
		att = max(att, 0.0);	
	}
	
	vec3 L = normalize(vLightTBN.xyz);
	vec3 N = normalize(vNormalTBN.xyz);
	vec3 V = normalize(vEyeTBN.xyz);
	
	
//// ECLAIRAGE :
	vec4 base = texture2D(texDiffuse, uv);	// Couleur diffuse

	float iDiffuse = max(dot(L, N), 0.0);	// Intensit&#1497; diffuse
	float iSpecular = pow(clamp(dot(reflect(-L, N), V), 0.0, 1.0), gl_FrontMaterial.shininess );
	
	
	vec4 cAmbient = gl_LightSource[curr].ambient * gl_FrontMaterial.ambient;
	vec4 cDiffuse = gl_LightSource[curr].diffuse * gl_FrontMaterial.diffuse * iDiffuse;	
	vec4 cSpecular = gl_LightSource[curr].specular * gl_FrontMaterial.specular * iSpecular;	
		
	
	// Si c'est une lumi&#1496;re SPOT
	if(vLightTBN.w > 1.5)
	{
		// Gestion du halo du spot
		if(dot(normalize(vPixToLightMV[curr].xyz), normalize(-vLightDirMV[curr].xyz)) < gl_LightSource[curr].spotCosCutoff)
		{
			cDiffuse = vec4(0.0, 0.0, 0.0, 1.0);
			cSpecular = vec4(0.0, 0.0, 0.0, 1.0);
		}
		else
		{
			// Shadow mapping :
			if(enable_shadow_mapping != 0) {
				vec3 vPixPosInDepthMap;
				float shadow = ShadowMapping(gl_TexCoord[1], vPixPosInDepthMap);
				cDiffuse = (shadow) * cDiffuse;
				cSpecular = (shadow) * cSpecular;
				
				// Texture projection :
				if(enable_shadow_mapping == 2) {
					vec4 cProjected = texture2D(texDiffuseProjected, vec2(vPixPosInDepthMap.s, 1.0-vPixPosInDepthMap.t));
					base.xyz = mix(base.xyz, cProjected.xyz, shadow/2.0);
				}

			}
		}
	}
	// Si c'est pas une lumi&#1496;re SPOT
	else
	{

	}


	vec4 color = cAmbient * base + (cDiffuse * base + cSpecular) * att;
	color.a = base.a;
	
	return color;	
}

float ShadowMapping(vec4 vVertexFromLightView, out vec3 vPixPosInDepthMap)
{
	float fShadow = 0.0;
						
	vec2 tOffset[3*3];
	tOffset[0] = vec2(-1.0, -1.0); tOffset[1] = vec2(0.0, -1.0); tOffset[2] = vec2(1.0, -1.0);
	tOffset[3] = vec2(-1.0,  0.0); tOffset[4] = vec2(0.0,  0.0); tOffset[5] = vec2(1.0,  0.0);
	tOffset[6] = vec2(-1.0,  1.0); tOffset[7] = vec2(0.0,  1.0); tOffset[8] = vec2(1.0,  1.0);


	vPixPosInDepthMap = vVertexFromLightView.xyz/vVertexFromLightView.w;	// homog&#1497;nisation
	vPixPosInDepthMap = (vPixPosInDepthMap + 1.0) * 0.5;					// de l'intervale [-1 1] &#1488; [0 1]
	
	
	vec4 vDepthMapColor = shadow2D(texDepthMapFromLight, vPixPosInDepthMap);
	//vDepthMapColor += shadow2D(texDepthMapFromLight2, vPixPosInDepthMap);
	if((vDepthMapColor.z+Z_TEST_SIGMA) < vPixPosInDepthMap.z)
	{
		fShadow = 0.0;
		
		// Sof Shadow pour les fragments proches
		if( length(vVertexMV.xyz) < 12.0 )
		{
			for(int i=0; i<9; i++)
			{
				vec2 offset = tOffset[i] / (float(depth_map_size));
				// Couleur du pixel sur la depth map
				vec4 vDepthMapColor = shadow2D(texDepthMapFromLight, vPixPosInDepthMap + vec3(offset.s, offset.t, 0.0));
				//vDepthMapColor += shadow2D(texDepthMapFromLight2, vPixPosInDepthMap + vec3(offset.s, offset.t, 0.0));
		
				if((vDepthMapColor.z+Z_TEST_SIGMA) < vPixPosInDepthMap.z) {
					fShadow += 0.0;
				}
				else {
					fShadow += 1.0 / 9.0;
				}
			}
		}
	}
	else
	{
		fShadow = 1.0;
	}

	fShadow = clamp(fShadow, 0.0, 1.0);
	return fShadow;
}


and sory for my bad english :slight_smile:

never mind. i solve that.

…and what did you do to solve it?

PS. You make use of vTangent but I don’t see that defined as a varying or an attribute. Is this the complete shader or have you missed that?

it possible that i miss something…
the problem is that i cant use more then 3 lights becouse of the variables limit…
now i see i need at least 7 lights so i search for another way, maybe multi pass render.
if you wont the shader i can publish it.

Depending on your “situation” (minimum graphics card requirement, resolution, AA settings, avg batch size, shader efficiency, and how much time you’ve got to do this render), you may be able to just brute-force multipass it. That is, render the scene N times, each time blending (adding) on the lighting for one additional light. Of course you can use < N passes if you apply multiple lights per pass. Even so, this can get expensive. However, this is quick and easy to try, assuming you’re doing gamma-correct rendering. One of the biggest things here is to make sure you’re adding light in linear light space not gamma-corrected light space.

To reduce the expense, you can cull the scene per light and then only pass down the objects for rendering each pass which may be lit by the light being blended on. This can reduce the amount of useless work involved, particular if your batches are tiny and lights don’t overlap.

But it’s typical for batches to be large nowadays for efficiency. So a more general solution is to do Deferred Rendering, where you render your opaque scene once into a framebuffer, and then come back and apply lights directly to the pixels rather than sending the scene objects down the pipe again (and maybe again, and again…). Not without some issues of its own you need to special-handle, such as transparency and AA requiring special handling. There are several variations of Deferred Rendering, such as Deferred Shading, Deferred Lighting, and Light-indexed Deferred Rendering.

You know your problem domain best, so you need to decide, or provide more info on it so we can help you out with more detailed pros/cons.

i render the scene for each light. i render to frame buffer and then with a different shader i render all the lights to screen.
it’s work and for 2 lights(with shadow and parralax mapping) i have more then 60 fps, for now its enough for me.