1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/* Copyright (C) 2024 DorotaC
* SPDX-License-Identifier: MIT OR Apache-2.0
*/
/*! Real-life example: using multiple threads with the manual API.
*
* This example demonstrates using the GPU to draw camera frames with zero copies, using the API with manual buffer management.
*
* Compared to glium_2, this example has more straightforward structure where buffers just go where there are needed. This freedom, however, makes it possible to create deadlocks or resource leaks.
*/
use std::io;
use std::os::fd::AsFd;
use std::sync::mpsc;
use std::thread;
use std::time::Instant;
use glium::Surface;
use vidi::{Config, FourCC, StreamManual};
use vidi_examples::{LoopHandler, build_egl_window, import_dmabuf_glutin};
#[derive(Debug, Clone, Copy)]
enum UserEvent {
WakeUp,
}
fn main() -> io::Result<()> {
// Setup the GL display stuff
let event_loop = winit::event_loop::EventLoop::<UserEvent>::with_user_event().build()
.map_err(io::Error::other)?;
let event_loop_proxy = event_loop.create_proxy();
let (_window, display, egl_display) = build_egl_window(&event_loop);
let (tx, rx) = mpsc::channel();
//let (backtx, backrx) = mpsc::channel();
let cameras_list = vidi::actors::camera_list::spawn()?;
let cameras = cameras_list.cameras();
// TODO: don't crash on zero cameras, show info instead
let camera = cameras_list.create(&cameras[0].info.id)
.expect("No camera found")
.expect("Failed to get camera");
dbg!(camera.get_id());
// Allocate 4 buffers by default
let buffer_count = 4;
let camera = camera.acquire();
if let Ok(mut camera) = camera {
thread::spawn(move || {
let mut stream: StreamManual = camera.start_manual(
Config{fourcc: FourCC::new(b"YUYV"), width: 640, height: 480},
buffer_count,
).unwrap();
let mut buf = stream.start().unwrap();
// The .clone() here is needed because without it, the buffer would have been moved and the `buf` name invalidated outside of the loop. Then there would be no buffer to use to finish() the capture.
while let Ok((b, meta)) = stream.cycle_buffer(buf.clone()) {
println!(
"Buffer seq: {}, timestamp: {}",
meta.sequence,
meta.timestamp,
);
let _ = event_loop_proxy.send_event(UserEvent::WakeUp);
tx.send(b.clone()).unwrap();
buf = b;
}
match stream.finish(buf) {
Ok(()) => {},
Err((e, _buf, _stream)) => {println!("Error stopping stream: {:?}", e)},
};
});
}
let shader = crispy::shaders::yuv::YuyvToRgba::new(
&display,
(640, 480),
).unwrap();
event_loop.run_app(&mut LoopHandler {
user_event: move |_event| {
let t0 = Instant::now();
let t1 = Instant::now();
let mut target = display.draw();
let buf = rx.recv().unwrap();
let buf = buf.read_arc();
let tex = import_dmabuf_glutin(
&display,
&egl_display,
buf.as_fd(),
// The buffer has two of R8 bytes per pixel.
(640*2, 480),
// This re-interprets the texture from YUYV to R8.
// GPUs don't seem to like YUYV textures, and crispy shaders aren't written with anything but R8 in mind.
// When in doubt, check shader documentation.
crispy::Format::R8,
).unwrap();
target.clear_color(0.0, 0.0, 0.0, 255.0);
shader.convert(
&display,
tex.get_texture(),
&mut target,
crispy::shaders::yuv::ColorSpace::BT709,
crispy::shaders::yuv::Gamma::Identity,
).unwrap();
target.finish().unwrap();
println!(
"ms: {}\t (buffer) + {}\t (UI)",
t1.duration_since(t0).as_millis(),
t1.elapsed().as_millis()
);
}
})
.map_err(io::Error::other)
}