<html>
<body>
<canvas id="canvas" width=960 height=1080></canvas>
<script type="module">
const video = document.createElement('video');
video.src = './vc/teddy1_vp9_640x360_30fps.webm';
video.loop = true;
video.muted = true;
await video.play();
// Test with two videos. The original implementation had a bug
// where importing a second video would result in improper
// synchronization. The mailbox would be used before it was created.
const video2 = document.createElement('video');
video2.src = './vc/teddy2_vp9_320x180_15fps.webm';
video2.loop = true;
video2.muted = true;
await video2.play();
const adapter = navigator.gpu && await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const canvas = document.getElementById('canvas');
const context = canvas.getContext('webgpu');
const swapChainFormat = 'bgra8unorm';
context.configure({
device,
format: swapChainFormat,
});
// TODO: Use getBindGroupLayout. We can't right now because SPIRV-Cross
// doesn't reflect external textures correctly.
const bindGroupLayout = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
sampler: {type: "filtering"}
},
{
binding: 1,
visibility: GPUShaderStage.FRAGMENT,
externalTexture: {}
},{
binding: 2,
visibility: GPUShaderStage.FRAGMENT,
externalTexture: {}
}
],
});
const pipelineLayout = device.createPipelineLayout({bindGroupLayouts: [bindGroupLayout]});
const pipeline = device.createRenderPipeline({
layout: pipelineLayout,
vertex: {
module: device.createShaderModule({
code: `
struct VertexOutput {
@builtin(position) Position : vec4<f32>,
@location(0) fragUV : vec2<f32>,
}
@vertex
fn main(@builtin(vertex_index) idx : u32) -> VertexOutput {
var pos = array<vec3<f32>, 6>(
vec3<f32>( 1.0, 1.0, 0.0),
vec3<f32>( 1.0, -1.0, 0.0),
vec3<f32>(-1.0, -1.0, 0.0),
vec3<f32>( 1.0, 1.0, 0.0),
vec3<f32>(-1.0, -1.0, 0.0),
vec3<f32>(-1.0, 1.0, 0.0)
);
var uv = array<vec2<f32>, 6>(
vec2<f32>(1.0, 0.0),
vec2<f32>(1.0, 2.0),
vec2<f32>(0.0, 2.0),
vec2<f32>(1.0, 0.0),
vec2<f32>(0.0, 2.0),
vec2<f32>(0.0, 0.0)
);
var output : VertexOutput;
output.Position = vec4<f32>(pos[idx], 1.0);
output.fragUV = uv[idx];
return output;
}
`,
}),
entryPoint: 'main',
},
fragment: {
module: device.createShaderModule({
code: `
@binding(0) @group(0) var mySampler: sampler;
@binding(1) @group(0) var myTexture: texture_external;
@binding(2) @group(0) var myTexture2: texture_external;
@fragment
fn main(@location(0) fragUV : vec2<f32>) -> @location(0) vec4<f32> {
var result : vec4<f32>;
if (fragUV.y <= 1.0) {
result = textureSampleLevel(myTexture, mySampler, fragUV);
} else {
var uv : vec2<f32> = vec2<f32>(fragUV.x, fragUV.y - 1.0);
result = textureSampleLevel(myTexture2, mySampler, uv);
}
return result;
}
`,
}),
entryPoint: 'main',
targets: [{
format: swapChainFormat,
}],
},
primitive: {
topology: 'triangle-list',
},
});
const linearSampler = device.createSampler();
const renderPassDescriptor = {
colorAttachments: [
{
view: undefined, // Assigned later
loadOp: 'clear',
clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
storeOp: 'store',
},
],
};
function frame() {
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const externalTextureDescriptor = {source: video};
const externalTextureDescriptor2 = {source: video2};
const externalTexture = device.importExternalTexture(externalTextureDescriptor);
const externalTexture2 = device.importExternalTexture(externalTextureDescriptor2);
const bindGroup = device.createBindGroup({
layout: bindGroupLayout,
entries: [
{
binding: 0,
resource: linearSampler,
},
{
binding: 1,
resource: externalTexture,
},
{
binding: 2,
resource: externalTexture2,
}
],
});
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
passEncoder.setBindGroup(0, bindGroup);
passEncoder.draw(6);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
requestAnimationFrame(frame);
}
requestAnimationFrame(frame);
</script>
</body>
</html>