Virtual Background Mediapipe

AI auto background removal using Mediapipe task vision

This example demonstates using a video source with Mediapipe AI background removal and mixing.

    <div class="flex w-full h-auto my-auto">
      <canvas id="app"></canvas>
  </div>
  <script type="module">
  	 

    const canvas = document.getElementById('app');

    const gfxOptions = {
        deviceTypes: ['webgl2'],
        glslangUrl: '/plugins/playcanvas/js/glslang/glslang.js',
        twgslUrl: '/plugins/playcanvas/js/twgsl/twgsl.js'
    };

    const device = await pc.createGraphicsDevice(canvas, gfxOptions);
    const createOptions = new pc.AppOptions();
    createOptions.graphicsDevice = device;

    createOptions.componentSystems = [
    pc.RenderComponentSystem,
    pc.CameraComponentSystem,
    pc.LightComponentSystem
];
    createOptions.resourceHandlers = [pc.TextureHandler, pc.ContainerHandler, pc.ScriptHandler];

    const app = new pc.AppBase(canvas);
    app.init(createOptions);

    const assets = {
    "playcanvas-virtual-background": new pc.Asset('playcanvas-virtual-background', 'script', { 'url': '../../js/playcanvas-virtual-background-1.70.js' }),
    "tv": new pc.Asset('tv', 'container', { 'url': '/plugins/playcanvas/assets/models/tv.glb' })
};


    const assetListLoader = new pc.AssetListLoader(Object.values(assets), app.assets);
    assetListLoader.load(async() => {
      app.start();

      // Set the canvas to fill the window and automatically change resolution to be the same as the canvas size
      app.setCanvasFillMode(pc.FILLMODE_FILL_WINDOW);
      app.setCanvasResolution(pc.RESOLUTION_AUTO);

      // Ensure canvas is resized when window changes size
      const resize = () => app.resizeCanvas();
      window.addEventListener('resize', resize);
      app.on('destroy', () => {
          window.removeEventListener('resize', resize);
      });

      app.scene.ambientLight = new pc.Color(0.2, 0.2, 0.2);

      // Create an Entity with a camera component
      const camera = new pc.Entity();
      camera.addComponent('camera', {
          clearColor: new pc.Color(0.4, 0.45, 0.5)
      });
      camera.translate(0, 0, 15);

      // Create an Entity with a omni light
      const light = new pc.Entity();
      light.addComponent('light', {
          type: 'omni',
          color: new pc.Color(1, 1, 1),
          range: 30
      });
      light.translate(5, 5, 10);

      app.root.addChild(camera);
      app.root.addChild(light);

        
        const video = document.createElement('video');
        video.id = 'vid';
        video.loop = true;

        // Muted so that we can autoplay
        video.muted = true;
        video.autoplay = true;

        // Inline needed for iOS otherwise it plays at fullscreen
        video.playsInline = true;

        video.crossOrigin = 'anonymous';

        // Make sure that the video is in view on the page otherwise it doesn't
        // load on some browsers, especially mobile
        video.setAttribute(
            'style',
            'display: block; width: 1px; height: 1px; position: absolute; opacity: 0; z-index: -1000; top: 0px; pointer-events: none'
        );

        document.body.append(video);
 

 

      // create an entity to render the tv mesh
const entity = assets.tv.resource.instantiateRenderEntity();
entity.setLocalEulerAngles(90, 0, -90);
app.root.addChild(entity);
const virtual = new pc.Entity();
const comp = virtual.addComponent("virtual-background", {
meshInstance: entity.render.meshInstances[1],
source: video,
renderType: 1,
bgImage: "../../textures/virtualbg.jpg"
});
app.root.addChild(virtual);
const canvasTexture = comp.canvasTexture,
displayCanvas = comp.displayCanvas;
console.log("display canvas texture ", canvasTexture, displayCanvas);

        video.src = "https://videos.electroteque.org/mediapipe/head-pose-face-detection-female.mp4"
 

 


        video.load();
        video.play();
 
      });
  </script>