| 1 | //! Video decoding for video wallpapers |
| 2 | //! |
| 3 | //! Provides frame-by-frame video decoding using ffmpeg. |
| 4 | //! Requires the `video` feature and system ffmpeg libraries. |
| 5 | |
| 6 | #![cfg(feature = "video")] |
| 7 | |
| 8 | use anyhow::{Context, Result}; |
| 9 | use ffmpeg_next as ffmpeg; |
| 10 | use ffmpeg_next::format::{input, Pixel}; |
| 11 | use ffmpeg_next::media::Type; |
| 12 | use ffmpeg_next::software::scaling::{context::Context as ScalerContext, flag::Flags}; |
| 13 | use ffmpeg_next::util::frame::video::Video as VideoFrame; |
| 14 | use image::RgbaImage; |
| 15 | use std::path::Path; |
| 16 | use std::sync::Once; |
| 17 | use std::time::Duration; |
| 18 | |
| 19 | static FFMPEG_INIT: Once = Once::new(); |
| 20 | |
| 21 | /// Initialize ffmpeg (call once at startup) |
| 22 | pub fn init() -> Result<()> { |
| 23 | let mut init_result = Ok(()); |
| 24 | FFMPEG_INIT.call_once(|| { |
| 25 | if let Err(e) = ffmpeg::init() { |
| 26 | init_result = Err(anyhow::anyhow!("Failed to initialize ffmpeg: {}", e)); |
| 27 | } |
| 28 | }); |
| 29 | init_result |
| 30 | } |
| 31 | |
| 32 | /// Information about a video file |
| 33 | #[derive(Debug, Clone)] |
| 34 | pub struct VideoInfo { |
| 35 | /// Video width in pixels |
| 36 | pub width: u32, |
| 37 | /// Video height in pixels |
| 38 | pub height: u32, |
| 39 | /// Duration in seconds |
| 40 | pub duration: f64, |
| 41 | /// Frame rate (FPS) |
| 42 | pub frame_rate: f64, |
| 43 | /// Estimated total frames |
| 44 | pub frame_count: usize, |
| 45 | /// Video codec name |
| 46 | pub codec: String, |
| 47 | } |
| 48 | |
| 49 | /// A decoded video frame |
| 50 | pub struct DecodedFrame { |
| 51 | /// Frame image data (RGBA) |
| 52 | pub image: RgbaImage, |
| 53 | /// Presentation timestamp (seconds from start) |
| 54 | pub pts: f64, |
| 55 | /// Frame index |
| 56 | pub index: usize, |
| 57 | } |
| 58 | |
| 59 | /// Video decoder for extracting frames |
| 60 | pub struct VideoDecoder { |
| 61 | /// Input context |
| 62 | input: ffmpeg::format::context::Input, |
| 63 | /// Video stream index |
| 64 | stream_index: usize, |
| 65 | /// Decoder |
| 66 | decoder: ffmpeg::decoder::Video, |
| 67 | /// Scaler for format conversion |
| 68 | scaler: ScalerContext, |
| 69 | /// Video info |
| 70 | info: VideoInfo, |
| 71 | /// Current frame index |
| 72 | frame_index: usize, |
| 73 | /// Time base for PTS conversion |
| 74 | time_base: f64, |
| 75 | } |
| 76 | |
| 77 | impl VideoDecoder { |
| 78 | /// Open a video file for decoding |
| 79 | pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> { |
| 80 | init()?; |
| 81 | |
| 82 | let path = path.as_ref(); |
| 83 | let input = input(&path) |
| 84 | .with_context(|| format!("Failed to open video: {}", path.display()))?; |
| 85 | |
| 86 | Self::from_input(input) |
| 87 | } |
| 88 | |
| 89 | /// Open a video from bytes (writes to temp file) |
| 90 | pub fn open_bytes(data: &[u8]) -> Result<Self> { |
| 91 | init()?; |
| 92 | |
| 93 | // ffmpeg-next doesn't support reading from memory directly, |
| 94 | // so we write to a temp file |
| 95 | let temp_dir = std::env::temp_dir(); |
| 96 | let temp_path = temp_dir.join(format!("garbg_video_{}.mp4", std::process::id())); |
| 97 | std::fs::write(&temp_path, data) |
| 98 | .context("Failed to write video to temp file")?; |
| 99 | |
| 100 | let result = Self::open(&temp_path); |
| 101 | |
| 102 | // Clean up temp file |
| 103 | let _ = std::fs::remove_file(&temp_path); |
| 104 | |
| 105 | result |
| 106 | } |
| 107 | |
| 108 | /// Create decoder from an open input context |
| 109 | fn from_input(input: ffmpeg::format::context::Input) -> Result<Self> { |
| 110 | // Find the best video stream |
| 111 | let stream = input |
| 112 | .streams() |
| 113 | .best(Type::Video) |
| 114 | .context("No video stream found")?; |
| 115 | |
| 116 | let stream_index = stream.index(); |
| 117 | let time_base = stream.time_base(); |
| 118 | |
| 119 | // Get codec parameters |
| 120 | let codec_params = stream.parameters(); |
| 121 | let codec = ffmpeg::codec::context::Context::from_parameters(codec_params) |
| 122 | .context("Failed to create codec context")?; |
| 123 | |
| 124 | let decoder = codec.decoder().video() |
| 125 | .context("Failed to create video decoder")?; |
| 126 | |
| 127 | let width = decoder.width(); |
| 128 | let height = decoder.height(); |
| 129 | |
| 130 | // Create scaler to convert to RGBA |
| 131 | let scaler = ScalerContext::get( |
| 132 | decoder.format(), |
| 133 | width, |
| 134 | height, |
| 135 | Pixel::RGBA, |
| 136 | width, |
| 137 | height, |
| 138 | Flags::BILINEAR, |
| 139 | ).context("Failed to create video scaler")?; |
| 140 | |
| 141 | // Calculate video info |
| 142 | let duration = input.duration() as f64 / ffmpeg::ffi::AV_TIME_BASE as f64; |
| 143 | let frame_rate = stream.avg_frame_rate(); |
| 144 | let fps = if frame_rate.denominator() != 0 { |
| 145 | frame_rate.numerator() as f64 / frame_rate.denominator() as f64 |
| 146 | } else { |
| 147 | 30.0 // Default |
| 148 | }; |
| 149 | |
| 150 | let codec_name = decoder.codec() |
| 151 | .map(|c| c.name().to_string()) |
| 152 | .unwrap_or_else(|| "unknown".to_string()); |
| 153 | |
| 154 | let info = VideoInfo { |
| 155 | width, |
| 156 | height, |
| 157 | duration, |
| 158 | frame_rate: fps, |
| 159 | frame_count: (duration * fps) as usize, |
| 160 | codec: codec_name, |
| 161 | }; |
| 162 | |
| 163 | Ok(Self { |
| 164 | input, |
| 165 | stream_index, |
| 166 | decoder, |
| 167 | scaler, |
| 168 | info, |
| 169 | frame_index: 0, |
| 170 | time_base: time_base.numerator() as f64 / time_base.denominator() as f64, |
| 171 | }) |
| 172 | } |
| 173 | |
| 174 | /// Get video information |
| 175 | pub fn info(&self) -> &VideoInfo { |
| 176 | &self.info |
| 177 | } |
| 178 | |
| 179 | /// Get frame delay based on frame rate |
| 180 | pub fn frame_delay(&self) -> Duration { |
| 181 | if self.info.frame_rate > 0.0 { |
| 182 | Duration::from_secs_f64(1.0 / self.info.frame_rate) |
| 183 | } else { |
| 184 | Duration::from_millis(33) // ~30 FPS default |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | /// Decode the next frame |
| 189 | pub fn next_frame(&mut self) -> Result<Option<DecodedFrame>> { |
| 190 | let mut decoded = VideoFrame::empty(); |
| 191 | |
| 192 | // Read packets until we get a frame |
| 193 | for (stream, packet) in self.input.packets() { |
| 194 | if stream.index() != self.stream_index { |
| 195 | continue; |
| 196 | } |
| 197 | |
| 198 | self.decoder.send_packet(&packet) |
| 199 | .context("Failed to send packet to decoder")?; |
| 200 | |
| 201 | while self.decoder.receive_frame(&mut decoded).is_ok() { |
| 202 | // Convert to RGBA |
| 203 | let mut rgb_frame = VideoFrame::empty(); |
| 204 | self.scaler.run(&decoded, &mut rgb_frame) |
| 205 | .context("Failed to scale frame")?; |
| 206 | |
| 207 | // Convert to RgbaImage |
| 208 | let image = frame_to_image(&rgb_frame)?; |
| 209 | let pts = decoded.pts().unwrap_or(0) as f64 * self.time_base; |
| 210 | |
| 211 | let frame = DecodedFrame { |
| 212 | image, |
| 213 | pts, |
| 214 | index: self.frame_index, |
| 215 | }; |
| 216 | |
| 217 | self.frame_index += 1; |
| 218 | return Ok(Some(frame)); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | // Flush the decoder |
| 223 | self.decoder.send_eof() |
| 224 | .context("Failed to flush decoder")?; |
| 225 | |
| 226 | while self.decoder.receive_frame(&mut decoded).is_ok() { |
| 227 | let mut rgb_frame = VideoFrame::empty(); |
| 228 | self.scaler.run(&decoded, &mut rgb_frame) |
| 229 | .context("Failed to scale frame")?; |
| 230 | |
| 231 | let image = frame_to_image(&rgb_frame)?; |
| 232 | let pts = decoded.pts().unwrap_or(0) as f64 * self.time_base; |
| 233 | |
| 234 | let frame = DecodedFrame { |
| 235 | image, |
| 236 | pts, |
| 237 | index: self.frame_index, |
| 238 | }; |
| 239 | |
| 240 | self.frame_index += 1; |
| 241 | return Ok(Some(frame)); |
| 242 | } |
| 243 | |
| 244 | Ok(None) |
| 245 | } |
| 246 | |
| 247 | /// Seek to a specific time (seconds) |
| 248 | pub fn seek(&mut self, time_secs: f64) -> Result<()> { |
| 249 | let timestamp = (time_secs / self.time_base) as i64; |
| 250 | self.input.seek(timestamp, ..) |
| 251 | .context("Failed to seek in video")?; |
| 252 | self.decoder.flush(); |
| 253 | Ok(()) |
| 254 | } |
| 255 | |
| 256 | /// Reset to the beginning of the video |
| 257 | pub fn reset(&mut self) -> Result<()> { |
| 258 | self.seek(0.0)?; |
| 259 | self.frame_index = 0; |
| 260 | Ok(()) |
| 261 | } |
| 262 | |
| 263 | /// Get current frame index |
| 264 | pub fn current_index(&self) -> usize { |
| 265 | self.frame_index |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | /// Convert an ffmpeg video frame to an image::RgbaImage |
| 270 | fn frame_to_image(frame: &VideoFrame) -> Result<RgbaImage> { |
| 271 | let width = frame.width(); |
| 272 | let height = frame.height(); |
| 273 | let data = frame.data(0); |
| 274 | let linesize = frame.stride(0); |
| 275 | |
| 276 | let mut pixels = Vec::with_capacity((width * height * 4) as usize); |
| 277 | |
| 278 | for y in 0..height { |
| 279 | let row_start = (y as usize) * linesize; |
| 280 | let row_end = row_start + (width as usize * 4); |
| 281 | pixels.extend_from_slice(&data[row_start..row_end]); |
| 282 | } |
| 283 | |
| 284 | RgbaImage::from_raw(width, height, pixels) |
| 285 | .context("Failed to create image from frame data") |
| 286 | } |
| 287 | |
| 288 | /// Check if a file is a video (by extension) |
| 289 | pub fn is_video_file<P: AsRef<Path>>(path: P) -> bool { |
| 290 | let path = path.as_ref(); |
| 291 | let ext = path |
| 292 | .extension() |
| 293 | .and_then(|e| e.to_str()) |
| 294 | .map(|e| e.to_lowercase()); |
| 295 | |
| 296 | matches!(ext.as_deref(), Some("mp4" | "webm" | "mkv" | "avi" | "mov" | "m4v")) |
| 297 | } |
| 298 | |
| 299 | #[cfg(test)] |
| 300 | mod tests { |
| 301 | use super::*; |
| 302 | |
| 303 | #[test] |
| 304 | fn test_is_video_file() { |
| 305 | assert!(is_video_file("test.mp4")); |
| 306 | assert!(is_video_file("test.webm")); |
| 307 | assert!(is_video_file("test.mkv")); |
| 308 | assert!(!is_video_file("test.png")); |
| 309 | assert!(!is_video_file("test.gif")); |
| 310 | } |
| 311 | } |
| 312 |