Canvas Video

A video can be played on a canvas, as shown in the example below.

Drawing the video on a canvas will automatically select the current video frame and draw it, as shown below:

let videoToPlay = document.getElementById('videoToPlay');

...

ctx.drawImage(videoToPlay, 0, 0, width, height); // draw the currently playing frame

Example of a video being played on a canvas (Run example)

<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Worked example from lecture notes</title>
<style>
video
{
    width:300px;
}

#loadingMessage
{
    position:absolute;
    top:100px;
    left:100px;
    z-index:100;
    font-size:50px;
}
</style>

<script>
let canvas = null;
let ctx = null;
let videoToPlay;
let width = null;
let height = null;
let doubleBuffer = null;
let doubleBufferG = null;
let alphaImage = new Image();
alphaImage.src = 'images/four_leaf_clover.png';


window.onload = onAllAssetsLoaded;
document.write("<div id='loadingMessage'>Loading...</div>");
function onAllAssetsLoaded()
{
    // hide the webpage loading message
    document.getElementById('loadingMessage').style.visibility = "hidden";

    videoToPlay = document.getElementById('videoToPlay');
    canvas = document.getElementById('videoCanvas');
    ctx = canvas.getContext('2d');
    doubleBuffer = document.createElement('canvas');
    doubleBufferG = doubleBuffer.getContext('2d');
    width = videoToPlay.clientWidth;
    height = videoToPlay.clientHeight;
    canvas.width = width;
    canvas.height = height;
    doubleBuffer.width = width;
    doubleBuffer.height = height;

    // play the video 
    videoToPlay.play();

    renderCanvas();
}


let imageData = null;
let data = null;
function renderCanvas()
{
    requestAnimationFrame(renderCanvas);

    // draw a frame from the video onto the canvas
    if (videoToPlay.paused || videoToPlay.ended)
    {
        return false;
    }
    // drawVideoFrame the video frame into the double buffer
    doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);

    // get the image data (i.e. the pixels) from the double buffer
    imageData = doubleBufferG.getImageData(0, 0, width, height);
    data = imageData.data;
    // NOTE: We can place code here to adjust the pixels (held 
    //       in 'data') before displaying the pixels on the canvas

    // Draw the imageData onto the canvas
    ctx.putImageData(imageData, 0, 0);
}
</script>
</head>

<body>
<video id = 'videoToPlay' loop style='display:hidden'>
<source src = 'images/video.mp4' type= 'video/mp4'>
</video>
<canvas id = 'videoCanvas'></canvas>
</body>
</html> 

Modify the code above to overlay text onto a canvas video.

Modify the code above to overlay text and an image onto a canvas video.

Modify the code above to place a global alpha on a canvas video.

Canvas Video Alpha Compositions

Canvas video alpha filters are similar to canvas image alpha filters. Instead of writing an original image to the canvas, we write the current video frame to the canvas.

Drawing a video frame onto a canvas will always draw the entire frame. All alpha overlays will be ignored. The current video frame needs to be converted to an image before we write it to the canvas, as shown below:

doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);
let imageData = doubleBufferG.getImageData(0, 0, width, height);
let dataURL = doubleBuffer.toDataURL();
let videoFrameImage = new Image();
videoFrameImage.src = dataURL; 

We can then draw the 'videoFrameImage' using an alpha overlay, as shown below:

// 1) define the alpha area   
ctx.drawImage(alphaImage, 0, 0, width, height); 

// 2) select the alpha composite
ctx.globalCompositeOperation = 'source-in'; 
   
// 3) draw the original image
// only the part that overlaps the alpha area will be visible  
ctx.drawImage(videoFrameImage, 0, 0, width, height); 

Example of an alpha image filtered video (Run Example).

<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Worked example from lecture notes</title>
<style>
video
{
    width:300px;
}

#loadingMessage
{
    position:absolute;
    top:100px;
    left:100px;
    z-index:100;
    font-size:50px;
}
</style>

<script>
let canvas = null;
let ctx = null;
let videoToPlay;
let width = null;
let height = null;
let doubleBuffer = null;
let doubleBufferG = null;
let alphaImage = new Image();
alphaImage.src = 'images/four_leaf_clover.png';


window.onload = onAllAssetsLoaded;
document.write("<div id='loadingMessage'>Loading...</div>");
function onAllAssetsLoaded()
{
    // hide the webpage loading message
    document.getElementById('loadingMessage').style.visibility = "hidden";

    videoToPlay = document.getElementById('videoToPlay');
    canvas = document.getElementById('videoCanvas');
    ctx = canvas.getContext('2d');
    doubleBuffer = document.createElement('canvas');
    doubleBufferG = doubleBuffer.getContext('2d');
    width = videoToPlay.clientWidth;
    height = videoToPlay.clientHeight;
    canvas.width = width;
    canvas.height = height;
    doubleBuffer.width = width;
    doubleBuffer.height = height;

    // play the video 
    videoToPlay.play();

    renderCanvas();
}


function renderCanvas()
{
    requestAnimationFrame(renderCanvas);

    if (videoToPlay.paused || videoToPlay.ended)
    {
        return false;
    }
    // convert the video frame into an image
    doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);

    let dataURL = doubleBuffer.toDataURL();
    let videoFrameImage = new Image();
    videoFrameImage.src = dataURL;

    // 1) define the alpha area   
    ctx.drawImage(alphaImage, 0, 0, width, height);

    // 2) select the alpha composite
    ctx.globalCompositeOperation = 'source-in';

    // 3) draw the original image
    // only the part that overlaps the alpha area will be visible  
    ctx.drawImage(videoFrameImage, 0, 0, width, height);
}
</script>
</head>

<body>
<video id = 'videoToPlay' loop style='display:hidden'>
<source src = 'images/video.mp4' type= 'video/mp4'>
</video>
<canvas id = 'videoCanvas'></canvas>
</body>
</html> 

Modify the code above to display an alpha text overlayed video.

Modify the code above to display an alpha region overlayed video.

 

Canvas Video Filters

Sepia

Example of a sepia video being played on a canvas (Run example)

<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Worked example from lecture notes</title>
<style>
video
{
    width:300px;
}

#loadingMessage
{
    position:absolute;
    top:100px;
    left:100px;
    z-index:100;
    font-size:50px;
}
</style>

<script>
let canvas = null;
let ctx = null;
let videoToPlay;
let width = null;
let height = null;
let doubleBuffer = null;
let doubleBufferG = null;
let alphaImage = new Image();
alphaImage.src = 'images/four_leaf_clover.png';


window.onload = onAllAssetsLoaded;
document.write("<div id='loadingMessage'>Loading...</div>");
function onAllAssetsLoaded()
{
    // hide the webpage loading message
    document.getElementById('loadingMessage').style.visibility = "hidden";

    videoToPlay = document.getElementById('videoToPlay');
    canvas = document.getElementById('videoCanvas');
    ctx = canvas.getContext('2d');
    doubleBuffer = document.createElement('canvas');
    doubleBufferG = doubleBuffer.getContext('2d');
    width = videoToPlay.clientWidth;
    height = videoToPlay.clientHeight;
    canvas.width = width;
    canvas.height = height;
    doubleBuffer.width = width;
    doubleBuffer.height = height;

    // play the video 
    videoToPlay.play();

    renderCanvas();
}


let imageData = null;
let data = null;
function renderCanvas()
{
    requestAnimationFrame(renderCanvas);

    if (videoToPlay.paused || videoToPlay.ended)
    {
        return false;
    }
    // drawVideoFrame the video frame into the double buffer
    doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);

    // get the image data (i.e. the pixels) from the double buffer
    imageData = doubleBufferG.getImageData(0, 0, width, height);
    data = imageData.data;

    // Loop through the pixels, turning them grayscale
    for (let i = 0; i < data.length; i += 4)
    {
        red = data[i];
        green = data[i + 1];
        blue = data[i + 2];

        data[i] = (red * 0.393) + (green * 0.769) + (blue * 0.189);
        data[i + 1] = (red * 0.349) + (green * 0.686) + (blue * 0.168);
        data[i + 2] = (red * 0.272) + (green * 0.534) + (blue * 0.131);
    }

    // Draw the imageData onto the canvas
    ctx.putImageData(imageData, 0, 0);
}
</script>
</head>

<body>
<video id = 'videoToPlay' loop style='display:hidden'>
<source src = 'images/video.mp4' type= 'video/mp4'>
</video>
<canvas id = 'videoCanvas'></canvas>
</body>
</html> 

Adjust the code above to produce a canva video that is in greyscale (Example solution).

Adjust the code above to produce a canvas video that has its brightness adjusted (Example solution).

Invert

Example of an inverted video (Run Example)

<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Worked example from lecture notes</title>
<style>
video
{
    width:300px;
}

#loadingMessage
{
    position:absolute;
    top:100px;
    left:100px;
    z-index:100;
    font-size:50px;
}
</style>

<script>
let canvas = null;
let ctx = null;
let videoToPlay;
let width = null;
let height = null;
let doubleBuffer = null;
let doubleBufferG = null;
let alphaImage = new Image();
alphaImage.src = 'images/four_leaf_clover.png';


window.onload = onAllAssetsLoaded;
document.write("<div id='loadingMessage'>Loading...</div>");
function onAllAssetsLoaded()
{
    // hide the webpage loading message
    document.getElementById('loadingMessage').style.visibility = "hidden";

    videoToPlay = document.getElementById('videoToPlay');
    canvas = document.getElementById('videoCanvas');
    ctx = canvas.getContext('2d');
    doubleBuffer = document.createElement('canvas');
    doubleBufferG = doubleBuffer.getContext('2d');
    width = videoToPlay.clientWidth;
    height = videoToPlay.clientHeight;
    canvas.width = width;
    canvas.height = height;
    doubleBuffer.width = width;
    doubleBuffer.height = height;

    // play the video 
    videoToPlay.play();

    renderCanvas();
}


let imageData = null;
let data = null;
function renderCanvas()
{
    requestAnimationFrame(renderCanvas);

    // adjust brightness
    if (videoToPlay.paused || videoToPlay.ended)
    {
        return false;
    }
    // drawVideoFrame the video frame into the double buffer
    doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);

    // get the image data (i.e. the pixels) from the double buffer
    imageData = doubleBufferG.getImageData(0, 0, width, height);
    data = imageData.data;

    // do the invert
    for (let i = 0; i < data.length; i += 4)
    {
        data[i + 0] = 255 - data[i + 0];
        data[i + 1] = 255 - data[i + 1];
        data[i + 2] = 255 - data[i + 2];
        data[i + 3] = 255;
    }

    // Draw the imageData onto the canvas
    ctx.putImageData(imageData, 0, 0);
}
</script>
</head>

<body>
<video id = 'videoToPlay' loop style='display:hidden'>
<source src = 'images/video.mp4' type= 'video/mp4'>
</video>
<canvas id = 'videoCanvas'></canvas>
</body>
</html> 

Modify the code above to produce a posterised video.

Modify the code above to produce a threshold video.

Canvas Video Convolutions

Example of an edge detection video being played on a canvas (Run example)

<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Worked example from lecture notes</title>
<style>
video
{
    width:300px;
}

#loadingMessage
{
    position:absolute;
    top:100px;
    left:100px;
    z-index:100;
    font-size:50px;
}
</style>

<script>
let canvas = null;
let ctx = null;
let videoToPlay;
let width = null;
let height = null;
let doubleBuffer = null;
let doubleBufferG = null;
let alphaImage = new Image();
alphaImage.src = 'images/four_leaf_clover.png';

// set up the convolution matrix
// Note that this is the same convolution code from the Image Processing notes
let embossConvolutionMatrix = [0, 0, 0,
    0, 2, -1,
    0, -1, 0];



let blurConvolutionMatrix = [1, 2, 1,
    2, 4, 2,
    1, 2, 1];

let sharpenConvolutionMatrix = [0, -2, 0,
    -2, 11, -2,
    0, -2, 0];

let edgeDetectionConvolutionMatrix = [1, 1, 1,
    1, -7, 1,
    1, 1, 1];

let noConvolutionMatrix = [0, 0, 0,
    0, 1, 0,
    0, 0, 0];

// select any of the convolution matricies from above
let convolutionMatrix = edgeDetectionConvolutionMatrix;


window.onload = onAllAssetsLoaded;
document.write("<div id='loadingMessage'>Loading...</div>");
function onAllAssetsLoaded()
{
    // hide the webpage loading message
    document.getElementById('loadingMessage').style.visibility = "hidden";

    videoToPlay = document.getElementById('videoToPlay');
    canvas = document.getElementById('videoCanvas');
    ctx = canvas.getContext('2d');
    doubleBuffer = document.createElement('canvas');
    doubleBufferG = doubleBuffer.getContext('2d');
    width = videoToPlay.clientWidth;
    height = videoToPlay.clientHeight;
    canvas.width = width;
    canvas.height = height;
    doubleBuffer.width = width;
    doubleBuffer.height = height;

    // play the video 
    videoToPlay.play();

    renderCanvas();
}


let imageData = null;
let data = null;
function renderCanvas()
{
    requestAnimationFrame(renderCanvas);

    doubleBufferG.drawImage(videoToPlay, 0, 0, width, height);
    imageData = doubleBufferG.getImageData(0, 0, width, height);
    data = imageData.data;

    convolutionAmount = 0;
    for (let j = 0; j < 9; j++)
    {
        convolutionAmount += convolutionMatrix[j];
    }

    originalImageData = doubleBufferG.getImageData(0, 0, width, height);
    originalData = originalImageData.data;


    let convolvedPixel = null;
    for (let i = 0; i < data.length; i += 4)
    {
        data[ i + 3] = 255; // alpha

        // apply the convolution for each of red, green and blue
        for (let rgbOffset = 0; rgbOffset < 3; rgbOffset++)
        {
            // get the pixel and its eight sourrounding pixel values from the original image 
            let convolutionPixels = [originalData[i + rgbOffset - width * 4 - 4],
                originalData[i + rgbOffset - width * 4],
                originalData[i + rgbOffset - width * 4 + 4],
                originalData[i + rgbOffset - 4],
                originalData[i + rgbOffset],
                originalData[i + rgbOffset + 4],
                originalData[i + rgbOffset + width * 4 - 4],
                originalData[i + rgbOffset + width * 4],
                originalData[i + rgbOffset + width * 4 + 4]];

            // do the convolution
            convolvedPixel = 0;
            for (let j = 0; j < 9; j++)
            {
                convolvedPixel += convolutionPixels[j] * convolutionMatrix[j];
            }

            // place the convolved pixel in the double buffer		 
            if (convolutionMatrix == embossConvolutionMatrix) // embossed is treated differently
            {
                data[i + rgbOffset] = convolvedPixel + 127;
            }
            else
            {
                convolvedPixel /= convolutionAmount;
                data[i + rgbOffset] = convolvedPixel;
            }
        }
    }

    // Draw the imageData onto the canvas
    ctx.putImageData(imageData, 0, 0);
}
</script>
</head>

<body>
<video id = 'videoToPlay' loop style='display:hidden'>
<source src = 'images/video.mp4' type= 'video/mp4'>
</video>
<canvas id = 'videoCanvas'></canvas>
</body>
</html>    

Adjust the code above to produce a canvas video that is embossed (Example solution).

 

 
<div align="center"><a href="../../versionC/index.html" title="DKIT Lecture notes homepage for Derek O&#39; Reilly, Dundalk Institute of Technology (DKIT), Dundalk, County Louth, Ireland. Copyright Derek O&#39; Reilly, DKIT." target="_parent" style='font-size:0;color:white;background-color:white'>&nbsp;</a></div>