I have made a Shader Pass to passing points reacting to the scene’s objects proximity.
What I pass the points the coordinates are relative to the camera (ie: pos: (0, 0, 1) would be 1 meter away from the camera, following as it moves. Subtracting the camera’s position only partly fixes the problem, is there a matrix i should apply to a world position to get it in the scene ?
127.0.0.1:5500/scene.html - 23 January 2025 - Watch Video

scene0 = inverse(modelMatrix) * vec4(.0, .0, .0, 1.0 ); //applying the matrixes dont seem to affect the position
test = scene0.xyz - cameraWorldPos;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );;`
1 Like
In a post-processing or custom shader pass, you often have only the camera’s projectionMatrix
and modelViewMatrix
from a particular render pass, which reference the currently drawn geometry. If you want to convert a world-space coordinate into the correct clip-space or view-space position:
- Get the world-space point:
vec4 worldPos = vec4(someWorldCoord, 1.0);
- View space: multiply by the camera’s
viewMatrix
(which is effectively the inverse of cameraWorldMatrix
):
vec4 viewPos = viewMatrix * worldPos;
- Clip space: multiply by the camera’s
projectionMatrix
:
vec4 clipPos = projectionMatrix * viewPos;
gl_Position = clipPos;
If you only have modelViewMatrix
and cameraWorldPos
, you need the full modelMatrix
of the relevant object. Then do:
vec4 viewPos = modelViewMatrix * vec4(localPos, 1.0);
But for a raw world position not tied to a single mesh, you should multiply by viewMatrix
or the inverse of the camera’s world matrix, then by projectionMatrix
. Subtracting cameraWorldPos
alone isn’t enough because you also need the camera’s orientation.
Thank you for clarifying. I have tried in the vertex shader
scene0 = vec4(.0, .0, 1.0, 1.0 );
vec4 viewPos = inverse(cameraWorldMatrix) * scene0;
scene0 = projectionMatrix * viewPos;
test = scene0.xyz ;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
and
scene0 = vec4(.0, .0, 1.0, 1.0 );
vec4 viewPos = viewMatrix * scene0;
scene0 = modelViewMatrix * viewPos;
test = scene0.xyz - cameraWorldPos;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
```
scene0 = vec4(.0, .0, 1.0, 1.0 );
vec4 viewPos = inverse(cameraWorldMatrix) * scene0;
scene0 = projectionMatrix * viewPos;
test = scene0.xyz ;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
the problem remains, the point displays slightly off depending on the camera orientation. maybe it would be from thecalculated distance in the fragment shader?
The fragment shader is (that i have got from a previous question here: WorldPosition in shader - #2 by HEOJUNFO
):
float depth = getLinearDepth( vUv );
float fragDepth = texture2D(tDepth, vUv).r;
float viewZ = perspectiveDepthToViewZ(fragDepth, cameraNear, cameraFar);
vec4 viewPos = getViewPosition(vUv, fragDepth);
vec4 worldPos = cameraWorldMatrix * viewPos;
//HEATMAP
float intensity = distance(test.xyz, worldPos.xyz );
I gpt’ed this up for ya:
import * as THREE from "three";
import { Pass, FullScreenQuad } from "three/addons/postprocessing/Pass.js";
export class WorldPositionReconstructPass extends Pass {
constructor(camera) {
super();
this.camera = camera;
this.material = new THREE.ShaderMaterial({
uniforms: {
tDiffuse: { value: null },
tDepth: { value: null },
projectionMatrixInverse: { value: new THREE.Matrix4() },
viewMatrixInverse: { value: new THREE.Matrix4() }
},
vertexShader: `varying vec2 vUv;
void main(){
vUv = uv;
gl_Position = vec4(position,1.);
}`,
fragmentShader: `varying vec2 vUv;
uniform sampler2D tDiffuse,tDepth;
uniform mat4 projectionMatrixInverse,viewMatrixInverse;
vec3 getWorldPosition(vec2 uv){
float z=texture2D(tDepth,uv).r*2.-1.;
vec4 clipPos=vec4(uv*2.-1.,z,1.);
vec4 viewPos=projectionMatrixInverse*clipPos;
viewPos/=viewPos.w;
return (viewMatrixInverse*viewPos).xyz;
}
vec3 estimateWorldNormal(vec2 uv){
vec2 r=vec2(textureSize(tDepth,0)),t=1./r;
vec3 c=getWorldPosition(uv),
l=getWorldPosition(uv-vec2(t.x,0.)),
u=getWorldPosition(uv+vec2(0.,t.y));
return normalize(cross(l-c,u-c));
}
void main(){
vec3 wPos=getWorldPosition(vUv);
vec3 col=texture2D(tDiffuse,vUv).rgb*fract(wPos/100.);
// vec3 n=estimateWorldNormal(vUv); // example normal
gl_FragColor=vec4(col,1.);
}`
});
this.fsQuad = new FullScreenQuad(this.material);
}
render(renderer, writeBuffer, readBuffer /*, deltaTime, maskActive */) {
this.material.uniforms.tDiffuse.value = readBuffer.texture;
this.material.uniforms.tDepth.value = readBuffer.depthTexture;
this.material.uniforms.projectionMatrixInverse.value
.copy(this.camera.projectionMatrix)
.invert();
this.material.uniforms.viewMatrixInverse.value.copy(this.camera.matrixWorld);
const oldAutoClear = renderer.autoClear;
renderer.autoClear = false;
if (this.renderToScreen) {
renderer.setRenderTarget(null);
} else {
renderer.setRenderTarget(writeBuffer);
}
renderer.clear(false, true, false);
this.fsQuad.render(renderer);
renderer.autoClear = oldAutoClear;
}
}
3 Likes
This is the same algorithim as I am using. the issue here is using plain world positions, somehow they dont transpose properly.
from your screenshot it looks like you’re doing some kind of highlight based on world space distance to the controls.target?
You mentioned you want it to be based on the distance to the camera itself?
Is your camera a child of the scene?
threejs doesn’t require the camera to be a child of the scene, but if it isn’t, updating some of its parameters isn’t triggered automatically.
1 Like
Actually it works, i am still trying to see where my mistake was and ill post my code, sorry! and thank GPT for me!
1 Like
So I was using the wrong space transposing the world point in viewPos. Mainly the function getWorldPosition(vec uv). Also simplified gl_Position = vec4( position, 1.0 );
(not sure why it was different). Here is my code, in case it helps!
vertexShader: `
varying vec2 vUv;
varying vec4 scene0;
void main() {
vUv = uv;
scene0 = vec4(.0, .0, 1.0, 1.0 );
vec4 viewPos = viewMatrix * scene0;
gl_Position = vec4( position, 1.0 ); // gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */ `
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraWorldMatrix;
varying vec2 vUv;
varying vec4 scene0;
#include <packing>
//_____________WORLD POSITION
vec3 getWorldPosition(vec2 uv){
float z=texture2D(tDepth,uv).r*2.-1.;
vec4 clipPos=vec4(uv*2.-1.,z,1.);
vec4 viewPos= inverse(cameraProjectionMatrix)*clipPos;
viewPos/=viewPos.w;
return (cameraWorldMatrix*viewPos).xyz;
}
//_____________DEPTH
float getLinearDepth( const in vec2 screenPosition ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, screenPosition ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
//_____________GRADIENT
vec3 makeRainbow(float value){
// considering the gradient values [0:RED, .5:YELLOW, 1:Blue]
float r =0.0 ; //[1.0;1.0;0.0];
float g =0.0 ; //[1.0;1.0;0.0];
float b =0.0 ; //[.0;.0;1.0];
//CENTER GRADIENT
if(value <.5){
r = 1.0;
g = mix(0.0, 1.0, value*2.0);
b= 0.0;
}
//RED CENTER
else if(value <.2) { r = 1.0;g = 0.0; b= 0.0;}
//OUTER GRADIENT
else if(value >.5) {
r = mix(1.0, .0,( value-.5) * 3.0);
g = mix(1.0, .0, ( value-.5) * 2.5);
b= mix(.0, 2.5, ( value-.5) * 2.0);
}
return vec3(r,g,b);
}
//_____________MAIN
void main() {
vec3 worldPos = getWorldPosition(vUv);
//HEAT
float intensity = distance(scene0.xyz, worldPos.xyz );
//gradient color
vec3 gradCol = makeRainbow(intensity);
float alpha = ( 1.0- intensity) * ( 1.0- intensity);
if (intensity < 1.0){
gl_FragColor = vec4( gradCol, alpha);
}
}`,
1 Like