How to get canvas as a texture to 'chain' together shaders

Hello,
I’m trying to figure a way to bind the current canvas to a THREE.Texture. The idea is to utilize multiple shader effects and keep using the last effected canvas as a frame buffer input into the next shader in line (similar to this reference link ‘PhotoMosh’). Tried something like what I’ve included below but I get a warning:

WebGL warning: drawArraysInstanced: Texture level 0 would be read by TEXTURE_2D unit 0, but written by framebuffer attachment COLOR_ATTACHMENT0, which would be illegal feedback.

Any ideas?

   const ISFRenderer = require('../dist/build.js').Renderer; // isf shader implementation 

let video = null; // webcam 

init();

async function loadFile(src, callback) {
  const response = await fetch('examples/' + src);
  const body = await response.text();

  callback(body);
}

function init(){
  
  console.log("successful load!");

  // video // 

	video = document.createElement('video')
	video.autoplay = true

	navigator.mediaDevices.getUserMedia({
	video: true
	}).then(function(stream){
	video.srcObject = stream        
	})

  // canvas //

	const container = document.createElement('div');
	const canvas = document.createElement('canvas');

	container.style.position = 'relative';
	container.appendChild(canvas);

	canvas.width = window.innerWidth / 2;
	canvas.height = window.innerHeight / 2;
	document.body.appendChild(container);

	const gl = canvas.getContext('webgl2');

	// ISF //

    createRendering('dot.fs', 'dot.vs', gl, canvas, video, false); 
// Fragment Shader File, Vertex Shader File, Canvas Context, Whether we should render the canvas or not

    const tex = new THREE.Texture(canvas);
  	tex.needsUpdate = true
    tex.flipX = false
    tex.flipY = false

    createRendering('feedback.fs', undefined, gl, canvas, tex, true);

}



function createRendering(fsFilename, vsFilename, ctx, _canvas, _input, toggleDraw) {

  let fsSrc;
  const fsLoaded = (response) => {
    fsSrc = response;

    if (vsFilename) {
      loadFile(vsFilename, vsLoaded);
    } else {
      vsLoaded();
    }
  }

  const vsLoaded = (vsSrc) => {
   
    const renderer = new ISFRenderer(ctx);
	renderer.loadSource(fsSrc, vsSrc);

    const animate = () => {
      requestAnimationFrame(animate);

      // tapestryfract doesn't have inputImage so we'll need to check
      // if ('inputImage' in renderer.uniforms) {

        renderer.setValue('inputImage', _input);
      // }
      if (toggleDraw) { 
      		renderer.draw(_canvas);
  		}
    };

    requestAnimationFrame(animate);
   }

  loadFile(fsFilename, fsLoaded);
}

Technically you can read the content of the <canvas> into a texture with WebGLRenderer.readRenderTargetPixels(), but this approach is really resource-intensive and will decimate your framerate. See this discussion for more details on that.

The recommended approach is for you to render to a WebGLRenderTarget and then use the resulting WebGLRenderTarget.texture as texture input for your next render pass. Here’s a quick pseudocode on how to do that:

const renderer = new WebGLRenderer();
const rt = new WebGLRenderTarget();

animate() {
	// Set rt as the destination of the render
	renderer.setRenderTarget(rt);
	renderer.render(scene, camera);

	// Use result of rt as input for next pass
	material.inputTexture = rt.texture;

	// Passing null sets the canvas as the destination
	renderer.setRenderTarget(null);
	renderer.render(postScene, postCamera);
}
  • You can keep chaining renderTargets one after the other as needed.
  • Make sure you use a different scene for each render pass, otherwise WebGL will stop the operation to prevent an infinite feedback loop.