WebCodecs Example
Examples for using predict_batch with the WebCodecs API.
Example 1: ReadableStream as Input
<p>Inference results from a direct video stream:</p>
<canvas id="outputCanvas"></canvas>
<script src="https://assets.degirum.com/degirumjs/0.1.5/degirum-js.min.obf.js"></script>
<script type="module">
// --- Model Setup ---
const dg = new dg_sdk();
const secretToken = localStorage.getItem('secretToken') || prompt('Enter secret token:');
localStorage.setItem('secretToken', secretToken);
const MODEL_NAME = 'yolov8n_relu6_coco--640x640_quant_n2x_orca1_1';
const ZOO_IP = 'https://cs.degirum.com/degirum/public';
const zoo = await dg.connect('cloud', ZOO_IP, secretToken);
const model = await zoo.loadModel(MODEL_NAME);
// 1. Get video stream from webcam
const mediaStream = await navigator.mediaDevices.getUserMedia({ video: true });
const videoTrack = mediaStream.getVideoTracks()[0];
// 2. Create a processor to get a readable stream of frames
const processor = new MediaStreamTrackProcessor({ track: videoTrack });
const readableStream = processor.readable;
// 3. Feed the stream to predict_batch and loop through results
for await (const result of model.predict_batch(readableStream)) {
// Display the result on the canvas
await model.displayResultToCanvas(result, 'outputCanvas');
// IMPORTANT: Close the frame to release memory.
// The SDK does not close frames when you provide a raw stream.
result.imageFrame.close();
}
</script>Example 2: Real-Time Inference with Display in a <video> Element
<video> ElementExample 3: Parallel Inference on Four Video Streams
Was this helpful?

