@@ -0,0 +1,597 @@ |
| 1 | +use anyhow::{Context, Result}; |
| 2 | +use sha2::{Digest, Sha256}; |
| 3 | +use std::path::{Path, PathBuf}; |
| 4 | +use std::sync::Arc; |
| 5 | +use tokio::sync::RwLock; |
| 6 | +use tracing::{debug, info, warn, error}; |
| 7 | + |
| 8 | +use crate::storage::{ |
| 9 | + chunk_store::{ChunkStore, StorageStats}, |
| 10 | + metadata_store::{MetadataStore, FileMetadata as FileMetaData}, |
| 11 | + file_chunker::{FileChunker, FileMetadata as ChunkerFileMetadata, ChunkInfo}, |
| 12 | +}; |
| 13 | + |
| 14 | +/// Storage capacity configuration and limits |
| 15 | +/// |
| 16 | +/// Safety: Enforces storage limits to prevent disk exhaustion |
| 17 | +#[derive(Debug, Clone)] |
| 18 | +pub struct StorageConfig { |
| 19 | + /// Maximum storage capacity in bytes |
| 20 | + pub max_capacity: u64, |
| 21 | + |
| 22 | + /// Warning threshold (% of capacity) |
| 23 | + pub warning_threshold: f64, |
| 24 | + |
| 25 | + /// Critical threshold (% of capacity) - stop accepting new data |
| 26 | + pub critical_threshold: f64, |
| 27 | + |
| 28 | + /// Default chunk size for file splitting |
| 29 | + pub default_chunk_size: usize, |
| 30 | + |
| 31 | + /// Maximum file size to accept |
| 32 | + pub max_file_size: u64, |
| 33 | + |
| 34 | + /// Enable automatic garbage collection |
| 35 | + pub enable_gc: bool, |
| 36 | + |
| 37 | + /// Garbage collection interval in seconds |
| 38 | + pub gc_interval: u64, |
| 39 | +} |
| 40 | + |
| 41 | +impl Default for StorageConfig { |
| 42 | + fn default() -> Self { |
| 43 | + Self { |
| 44 | + max_capacity: 10 * 1024 * 1024 * 1024, // 10GB default |
| 45 | + warning_threshold: 0.8, // 80% |
| 46 | + critical_threshold: 0.95, // 95% |
| 47 | + default_chunk_size: 1024 * 1024, // 1MB |
| 48 | + max_file_size: 1024 * 1024 * 1024, // 1GB max file |
| 49 | + enable_gc: true, |
| 50 | + gc_interval: 3600, // 1 hour |
| 51 | + } |
| 52 | + } |
| 53 | +} |
| 54 | + |
| 55 | +/// Comprehensive storage capacity metrics |
| 56 | +/// |
| 57 | +/// Transparency: Detailed capacity tracking for monitoring |
| 58 | +#[derive(Debug, Clone)] |
| 59 | +pub struct CapacityInfo { |
| 60 | + /// Total configured capacity |
| 61 | + pub total_capacity: u64, |
| 62 | + |
| 63 | + /// Currently used space |
| 64 | + pub used_space: u64, |
| 65 | + |
| 66 | + /// Available space |
| 67 | + pub available_space: u64, |
| 68 | + |
| 69 | + /// Usage percentage (0.0 to 1.0) |
| 70 | + pub usage_percentage: f64, |
| 71 | + |
| 72 | + /// Number of stored files |
| 73 | + pub file_count: u64, |
| 74 | + |
| 75 | + /// Number of stored chunks |
| 76 | + pub chunk_count: u64, |
| 77 | + |
| 78 | + /// Average chunk size |
| 79 | + pub avg_chunk_size: u64, |
| 80 | + |
| 81 | + /// Storage efficiency (deduplication ratio) |
| 82 | + pub efficiency_ratio: f64, |
| 83 | +} |
| 84 | + |
| 85 | +/// Main storage manager coordinating all storage operations |
| 86 | +/// |
| 87 | +/// Safety: Enforces capacity limits and coordinates atomic operations |
| 88 | +/// Transparency: Comprehensive logging and metrics collection |
| 89 | +/// Privacy: Handles encrypted storage and secure deletion |
| 90 | +pub struct StorageManager { |
| 91 | + /// Chunk storage backend |
| 92 | + chunk_store: Arc<ChunkStore>, |
| 93 | + |
| 94 | + /// Metadata storage backend |
| 95 | + metadata_store: Arc<MetadataStore>, |
| 96 | + |
| 97 | + /// File chunking system |
| 98 | + file_chunker: Arc<FileChunker>, |
| 99 | + |
| 100 | + /// Storage configuration |
| 101 | + config: StorageConfig, |
| 102 | + |
| 103 | + /// Base storage path |
| 104 | + base_path: PathBuf, |
| 105 | + |
| 106 | + /// Capacity tracking |
| 107 | + capacity_info: Arc<RwLock<CapacityInfo>>, |
| 108 | +} |
| 109 | + |
| 110 | +impl StorageManager { |
| 111 | + /// Create a new StorageManager with specified configuration |
| 112 | + /// |
| 113 | + /// Safety: Initializes all storage backends with security settings |
| 114 | + pub async fn new<P: AsRef<Path>>(base_path: P, config: StorageConfig) -> Result<Self> { |
| 115 | + let base_path = base_path.as_ref().to_path_buf(); |
| 116 | + info!("Initializing StorageManager at: {:?}", base_path); |
| 117 | + |
| 118 | + // Create subdirectories for different storage types |
| 119 | + let chunk_path = base_path.join("chunks"); |
| 120 | + let metadata_path = base_path.join("metadata"); |
| 121 | + |
| 122 | + std::fs::create_dir_all(&chunk_path) |
| 123 | + .context("Failed to create chunk storage directory")?; |
| 124 | + std::fs::create_dir_all(&metadata_path) |
| 125 | + .context("Failed to create metadata storage directory")?; |
| 126 | + |
| 127 | + // Initialize storage backends |
| 128 | + let chunk_store = Arc::new(ChunkStore::new(&chunk_path) |
| 129 | + .context("Failed to initialize chunk store")?); |
| 130 | + |
| 131 | + let metadata_store = Arc::new(MetadataStore::new(&metadata_path) |
| 132 | + .context("Failed to initialize metadata store")?); |
| 133 | + |
| 134 | + let file_chunker = Arc::new(FileChunker::new(Some(config.default_chunk_size))?); |
| 135 | + |
| 136 | + // Initialize capacity tracking |
| 137 | + let capacity_info = Arc::new(RwLock::new(CapacityInfo { |
| 138 | + total_capacity: config.max_capacity, |
| 139 | + used_space: 0, |
| 140 | + available_space: config.max_capacity, |
| 141 | + usage_percentage: 0.0, |
| 142 | + file_count: 0, |
| 143 | + chunk_count: 0, |
| 144 | + avg_chunk_size: 0, |
| 145 | + efficiency_ratio: 1.0, |
| 146 | + })); |
| 147 | + |
| 148 | + let manager = Self { |
| 149 | + chunk_store, |
| 150 | + metadata_store, |
| 151 | + file_chunker, |
| 152 | + config, |
| 153 | + base_path, |
| 154 | + capacity_info, |
| 155 | + }; |
| 156 | + |
| 157 | + // Update capacity information from existing storage |
| 158 | + manager.refresh_capacity_info().await?; |
| 159 | + |
| 160 | + info!("StorageManager initialized successfully"); |
| 161 | + Ok(manager) |
| 162 | + } |
| 163 | + |
| 164 | + /// Store a file with automatic chunking and deduplication |
| 165 | + /// |
| 166 | + /// Safety: Enforces capacity limits and validates file integrity |
| 167 | + /// Transparency: Logs all storage operations with file hashes |
| 168 | + pub async fn store_file(&self, file_id: &str, data: &[u8], filename: &str) -> Result<String> { |
| 169 | + info!("Storing file: {} ({} bytes) as {}", filename, data.len(), file_id); |
| 170 | + |
| 171 | + // Check capacity limits before storing |
| 172 | + self.check_capacity_limits(data.len() as u64).await?; |
| 173 | + |
| 174 | + // Check file size limit |
| 175 | + if data.len() as u64 > self.config.max_file_size { |
| 176 | + return Err(anyhow::anyhow!( |
| 177 | + "File size ({} bytes) exceeds maximum allowed size ({} bytes)", |
| 178 | + data.len(), |
| 179 | + self.config.max_file_size |
| 180 | + )); |
| 181 | + } |
| 182 | + |
| 183 | + // Chunk the file |
| 184 | + let metadata = self.file_chunker.chunk_bytes(data, file_id.to_string(), filename.to_string())?; |
| 185 | + let file_hash = metadata.file_hash.clone(); |
| 186 | + |
| 187 | + debug!("File chunked into {} pieces", metadata.chunks.len()); |
| 188 | + |
| 189 | + // Store all chunks with deduplication |
| 190 | + let mut chunk_ids = Vec::new(); |
| 191 | + let mut stored_chunks = 0; |
| 192 | + |
| 193 | + // We need to get the actual chunk data from the original data |
| 194 | + for chunk_info in &metadata.chunks { |
| 195 | + let start = chunk_info.offset as usize; |
| 196 | + let end = start + chunk_info.size as usize; |
| 197 | + let chunk_data = &data[start..end]; |
| 198 | + |
| 199 | + let chunk_hash = self.chunk_store.store_chunk(&chunk_info.chunk_id, chunk_data).await?; |
| 200 | + chunk_ids.push(chunk_info.chunk_id.clone()); |
| 201 | + stored_chunks += 1; |
| 202 | + |
| 203 | + debug!("Stored chunk {}/{}: {} (hash: {})", |
| 204 | + stored_chunks, metadata.chunks.len(), chunk_info.chunk_id, chunk_hash); |
| 205 | + } |
| 206 | + |
| 207 | + // Create file metadata for storage |
| 208 | + let storage_metadata = FileMetaData { |
| 209 | + name: filename.to_string(), |
| 210 | + size: data.len() as u64, |
| 211 | + mime_type: self.detect_mime_type(data, filename), |
| 212 | + file_hash: file_hash.clone(), |
| 213 | + chunk_ids, |
| 214 | + created_at: std::time::SystemTime::now() |
| 215 | + .duration_since(std::time::UNIX_EPOCH)? |
| 216 | + .as_secs(), |
| 217 | + modified_at: std::time::SystemTime::now() |
| 218 | + .duration_since(std::time::UNIX_EPOCH)? |
| 219 | + .as_secs(), |
| 220 | + permissions: 0o644, // Default read-write for owner, read for others |
| 221 | + checksum: String::new(), // Will be calculated by metadata store |
| 222 | + }; |
| 223 | + |
| 224 | + // Store metadata |
| 225 | + self.metadata_store.store_metadata(file_id, storage_metadata).await?; |
| 226 | + |
| 227 | + // Update capacity information |
| 228 | + self.refresh_capacity_info().await?; |
| 229 | + |
| 230 | + info!("Successfully stored file: {} with hash: {}", file_id, file_hash); |
| 231 | + Ok(file_hash) |
| 232 | + } |
| 233 | + |
| 234 | + /// Retrieve a complete file by reconstructing from chunks |
| 235 | + /// |
| 236 | + /// Safety: Verifies integrity of all chunks before reconstruction |
| 237 | + /// Transparency: Logs retrieval operations and verification steps |
| 238 | + pub async fn retrieve_file(&self, file_id: &str) -> Result<Option<Vec<u8>>> { |
| 239 | + debug!("Retrieving file: {}", file_id); |
| 240 | + |
| 241 | + // Get file metadata |
| 242 | + let metadata = match self.metadata_store.get_metadata(file_id).await? { |
| 243 | + Some(meta) => meta, |
| 244 | + None => { |
| 245 | + debug!("File metadata not found: {}", file_id); |
| 246 | + return Ok(None); |
| 247 | + } |
| 248 | + }; |
| 249 | + |
| 250 | + info!("Retrieving file: {} ({} bytes, {} chunks)", |
| 251 | + metadata.name, metadata.size, metadata.chunk_ids.len()); |
| 252 | + |
| 253 | + // Retrieve all chunks |
| 254 | + let mut chunk_data = Vec::new(); |
| 255 | + for chunk_id in &metadata.chunk_ids { |
| 256 | + match self.chunk_store.retrieve_chunk(chunk_id).await? { |
| 257 | + Some(data) => chunk_data.push(data), |
| 258 | + None => { |
| 259 | + error!("Missing chunk {} for file {}", chunk_id, file_id); |
| 260 | + return Err(anyhow::anyhow!( |
| 261 | + "File reconstruction failed: missing chunk {}", chunk_id |
| 262 | + )); |
| 263 | + } |
| 264 | + } |
| 265 | + } |
| 266 | + |
| 267 | + // Create chunker metadata for reconstruction |
| 268 | + let chunker_metadata = ChunkerFileMetadata { |
| 269 | + file_id: file_id.to_string(), |
| 270 | + filename: metadata.name.clone(), |
| 271 | + total_size: metadata.size, |
| 272 | + file_hash: metadata.file_hash.clone(), |
| 273 | + chunks: { |
| 274 | + let mut offset = 0u64; |
| 275 | + metadata.chunk_ids.iter().enumerate().map(|(i, chunk_id)| { |
| 276 | + // Calculate the actual chunk hash |
| 277 | + let mut hasher = Sha256::new(); |
| 278 | + hasher.update(&chunk_data[i]); |
| 279 | + let chunk_hash = hex::encode(hasher.finalize()); |
| 280 | + |
| 281 | + let chunk_info = ChunkInfo { |
| 282 | + chunk_id: chunk_id.clone(), |
| 283 | + hash: chunk_hash, |
| 284 | + size: chunk_data[i].len() as u64, |
| 285 | + index: i as u32, |
| 286 | + offset, |
| 287 | + }; |
| 288 | + |
| 289 | + offset += chunk_data[i].len() as u64; |
| 290 | + chunk_info |
| 291 | + }).collect() |
| 292 | + }, |
| 293 | + chunk_size: self.config.default_chunk_size, |
| 294 | + mime_type: metadata.mime_type.clone(), |
| 295 | + created_at: metadata.created_at, |
| 296 | + }; |
| 297 | + |
| 298 | + // Reconstruct file |
| 299 | + let reconstructed = self.file_chunker.reconstruct_file(&chunker_metadata, chunk_data)?; |
| 300 | + |
| 301 | + // Verify file integrity |
| 302 | + let mut hasher = Sha256::new(); |
| 303 | + hasher.update(&reconstructed); |
| 304 | + let computed_hash = hex::encode(hasher.finalize()); |
| 305 | + if computed_hash != metadata.file_hash { |
| 306 | + error!("File integrity verification failed for {}: hash mismatch", file_id); |
| 307 | + return Err(anyhow::anyhow!( |
| 308 | + "File integrity verification failed: hash mismatch" |
| 309 | + )); |
| 310 | + } |
| 311 | + |
| 312 | + info!("Successfully retrieved and verified file: {}", file_id); |
| 313 | + Ok(Some(reconstructed)) |
| 314 | + } |
| 315 | + |
| 316 | + /// Delete a file and its associated chunks (with reference counting) |
| 317 | + /// |
| 318 | + /// Safety: Uses atomic operations and reference counting |
| 319 | + /// Transparency: Comprehensive logging of deletion process |
| 320 | + pub async fn delete_file(&self, file_id: &str) -> Result<bool> { |
| 321 | + info!("Deleting file: {}", file_id); |
| 322 | + |
| 323 | + // Get file metadata first |
| 324 | + let metadata = match self.metadata_store.get_metadata(file_id).await? { |
| 325 | + Some(meta) => meta, |
| 326 | + None => { |
| 327 | + debug!("Cannot delete non-existent file: {}", file_id); |
| 328 | + return Ok(false); |
| 329 | + } |
| 330 | + }; |
| 331 | + |
| 332 | + // Delete all associated chunks (with reference counting) |
| 333 | + let mut deleted_chunks = 0; |
| 334 | + for chunk_id in &metadata.chunk_ids { |
| 335 | + if self.chunk_store.delete_chunk(chunk_id).await? { |
| 336 | + deleted_chunks += 1; |
| 337 | + debug!("Deleted chunk: {}", chunk_id); |
| 338 | + } else { |
| 339 | + debug!("Chunk {} still has references, not deleted", chunk_id); |
| 340 | + } |
| 341 | + } |
| 342 | + |
| 343 | + // Delete metadata |
| 344 | + let metadata_deleted = self.metadata_store.delete_metadata(file_id).await?; |
| 345 | + |
| 346 | + // Update capacity information |
| 347 | + self.refresh_capacity_info().await?; |
| 348 | + |
| 349 | + info!("Successfully deleted file: {} (deleted {} chunks)", |
| 350 | + file_id, deleted_chunks); |
| 351 | + Ok(metadata_deleted) |
| 352 | + } |
| 353 | + |
| 354 | + /// List all stored files with metadata |
| 355 | + /// |
| 356 | + /// Transparency: Provides comprehensive file listing for audit |
| 357 | + pub async fn list_files(&self, limit: Option<usize>) -> Result<Vec<(String, FileMetaData)>> { |
| 358 | + self.metadata_store.list_files(limit).await |
| 359 | + } |
| 360 | + |
| 361 | + /// Check if a file exists |
| 362 | + pub async fn file_exists(&self, file_id: &str) -> Result<bool> { |
| 363 | + self.metadata_store.file_exists(file_id).await |
| 364 | + } |
| 365 | + |
| 366 | + /// Get current storage capacity information |
| 367 | + /// |
| 368 | + /// Transparency: Real-time capacity metrics for monitoring |
| 369 | + pub async fn get_capacity_info(&self) -> CapacityInfo { |
| 370 | + let info = self.capacity_info.read().await; |
| 371 | + info.clone() |
| 372 | + } |
| 373 | + |
| 374 | + /// Get detailed storage statistics |
| 375 | + /// |
| 376 | + /// Transparency: Comprehensive storage metrics |
| 377 | + pub async fn get_storage_stats(&self) -> Result<StorageStats> { |
| 378 | + Ok(self.chunk_store.get_stats().await) |
| 379 | + } |
| 380 | + |
| 381 | + /// Check if storage has enough capacity for new data |
| 382 | + /// |
| 383 | + /// Safety: Prevents storage exhaustion |
| 384 | + async fn check_capacity_limits(&self, required_space: u64) -> Result<()> { |
| 385 | + let info = self.capacity_info.read().await; |
| 386 | + |
| 387 | + // Check if we have enough space |
| 388 | + if info.available_space < required_space { |
| 389 | + return Err(anyhow::anyhow!( |
| 390 | + "Insufficient storage space: required {} bytes, available {} bytes", |
| 391 | + required_space, info.available_space |
| 392 | + )); |
| 393 | + } |
| 394 | + |
| 395 | + // Check if we're approaching critical threshold |
| 396 | + let projected_usage = (info.used_space + required_space) as f64 / info.total_capacity as f64; |
| 397 | + |
| 398 | + if projected_usage > self.config.critical_threshold { |
| 399 | + return Err(anyhow::anyhow!( |
| 400 | + "Storage usage would exceed critical threshold: {:.1}% > {:.1}%", |
| 401 | + projected_usage * 100.0, |
| 402 | + self.config.critical_threshold * 100.0 |
| 403 | + )); |
| 404 | + } |
| 405 | + |
| 406 | + // Warn if approaching warning threshold |
| 407 | + if projected_usage > self.config.warning_threshold { |
| 408 | + warn!("Storage usage approaching warning threshold: {:.1}%", |
| 409 | + projected_usage * 100.0); |
| 410 | + } |
| 411 | + |
| 412 | + Ok(()) |
| 413 | + } |
| 414 | + |
| 415 | + /// Refresh capacity information from storage backends |
| 416 | + /// |
| 417 | + /// Transparency: Accurate real-time capacity tracking |
| 418 | + async fn refresh_capacity_info(&self) -> Result<()> { |
| 419 | + let chunk_stats = self.chunk_store.get_stats().await; |
| 420 | + let files = self.metadata_store.list_files(None).await?; |
| 421 | + |
| 422 | + let used_space = chunk_stats.total_size; |
| 423 | + let available_space = self.config.max_capacity.saturating_sub(used_space); |
| 424 | + let usage_percentage = used_space as f64 / self.config.max_capacity as f64; |
| 425 | + |
| 426 | + // Calculate efficiency ratio (deduplication benefits) |
| 427 | + let logical_size: u64 = files.iter().map(|(_, meta)| meta.size).sum(); |
| 428 | + let efficiency_ratio = if used_space > 0 { |
| 429 | + logical_size as f64 / used_space as f64 |
| 430 | + } else { |
| 431 | + 1.0 |
| 432 | + }; |
| 433 | + |
| 434 | + let avg_chunk_size = if chunk_stats.total_chunks > 0 { |
| 435 | + chunk_stats.total_size / chunk_stats.total_chunks |
| 436 | + } else { |
| 437 | + 0 |
| 438 | + }; |
| 439 | + |
| 440 | + let mut info = self.capacity_info.write().await; |
| 441 | + *info = CapacityInfo { |
| 442 | + total_capacity: self.config.max_capacity, |
| 443 | + used_space, |
| 444 | + available_space, |
| 445 | + usage_percentage, |
| 446 | + file_count: files.len() as u64, |
| 447 | + chunk_count: chunk_stats.total_chunks, |
| 448 | + avg_chunk_size, |
| 449 | + efficiency_ratio, |
| 450 | + }; |
| 451 | + |
| 452 | + debug!("Capacity info updated: {:.1}% used ({} files, {} chunks)", |
| 453 | + usage_percentage * 100.0, info.file_count, info.chunk_count); |
| 454 | + |
| 455 | + Ok(()) |
| 456 | + } |
| 457 | + |
| 458 | + /// Simple MIME type detection based on file extension and content |
| 459 | + fn detect_mime_type(&self, data: &[u8], filename: &str) -> Option<String> { |
| 460 | + // Check magic bytes for common formats |
| 461 | + if data.len() >= 4 { |
| 462 | + match &data[0..4] { |
| 463 | + [0xFF, 0xD8, 0xFF, _] => return Some("image/jpeg".to_string()), |
| 464 | + [0x89, 0x50, 0x4E, 0x47] => return Some("image/png".to_string()), |
| 465 | + [0x47, 0x49, 0x46, _] => return Some("image/gif".to_string()), |
| 466 | + [0x25, 0x50, 0x44, 0x46] => return Some("application/pdf".to_string()), |
| 467 | + _ => {} |
| 468 | + } |
| 469 | + } |
| 470 | + |
| 471 | + // Fallback to extension-based detection |
| 472 | + if let Some(extension) = Path::new(filename).extension() { |
| 473 | + match extension.to_str()?.to_lowercase().as_str() { |
| 474 | + "txt" => Some("text/plain".to_string()), |
| 475 | + "json" => Some("application/json".to_string()), |
| 476 | + "xml" => Some("application/xml".to_string()), |
| 477 | + "html" => Some("text/html".to_string()), |
| 478 | + "css" => Some("text/css".to_string()), |
| 479 | + "js" => Some("application/javascript".to_string()), |
| 480 | + "mp4" => Some("video/mp4".to_string()), |
| 481 | + "mp3" => Some("audio/mpeg".to_string()), |
| 482 | + "zip" => Some("application/zip".to_string()), |
| 483 | + _ => None, |
| 484 | + } |
| 485 | + } else { |
| 486 | + None |
| 487 | + } |
| 488 | + } |
| 489 | +} |
| 490 | + |
| 491 | +#[cfg(test)] |
| 492 | +mod tests { |
| 493 | + use super::*; |
| 494 | + use tempfile::tempdir; |
| 495 | + |
| 496 | + #[tokio::test] |
| 497 | + async fn test_storage_manager_creation() { |
| 498 | + let temp_dir = tempdir().unwrap(); |
| 499 | + let config = StorageConfig::default(); |
| 500 | + |
| 501 | + let manager = StorageManager::new(temp_dir.path(), config).await.unwrap(); |
| 502 | + let capacity = manager.get_capacity_info().await; |
| 503 | + |
| 504 | + assert_eq!(capacity.used_space, 0); |
| 505 | + assert_eq!(capacity.file_count, 0); |
| 506 | + assert_eq!(capacity.chunk_count, 0); |
| 507 | + } |
| 508 | + |
| 509 | + #[tokio::test] |
| 510 | + async fn test_store_and_retrieve_file() { |
| 511 | + let temp_dir = tempdir().unwrap(); |
| 512 | + let config = StorageConfig::default(); |
| 513 | + let manager = StorageManager::new(temp_dir.path(), config).await.unwrap(); |
| 514 | + |
| 515 | + let file_id = "test-file-1"; |
| 516 | + let filename = "test.txt"; |
| 517 | + let data = b"Hello, ZephyrFS! This is a test file with some content."; |
| 518 | + |
| 519 | + // Store file |
| 520 | + let hash = manager.store_file(file_id, data, filename).await.unwrap(); |
| 521 | + assert!(!hash.is_empty()); |
| 522 | + |
| 523 | + // Verify existence |
| 524 | + assert!(manager.file_exists(file_id).await.unwrap()); |
| 525 | + |
| 526 | + // Retrieve file |
| 527 | + let retrieved = manager.retrieve_file(file_id).await.unwrap().unwrap(); |
| 528 | + assert_eq!(retrieved, data); |
| 529 | + |
| 530 | + // Check capacity info |
| 531 | + let capacity = manager.get_capacity_info().await; |
| 532 | + assert_eq!(capacity.file_count, 1); |
| 533 | + assert!(capacity.used_space > 0); |
| 534 | + assert!(capacity.usage_percentage > 0.0); |
| 535 | + } |
| 536 | + |
| 537 | + #[tokio::test] |
| 538 | + async fn test_capacity_limits() { |
| 539 | + let temp_dir = tempdir().unwrap(); |
| 540 | + let mut config = StorageConfig::default(); |
| 541 | + config.max_capacity = 100; // Very small capacity |
| 542 | + config.critical_threshold = 0.5; // 50% |
| 543 | + |
| 544 | + let manager = StorageManager::new(temp_dir.path(), config).await.unwrap(); |
| 545 | + |
| 546 | + let file_id = "large-file"; |
| 547 | + let filename = "large.txt"; |
| 548 | + let data = vec![0u8; 200]; // Larger than capacity |
| 549 | + |
| 550 | + // Should fail due to capacity limits |
| 551 | + let result = manager.store_file(file_id, &data, filename).await; |
| 552 | + assert!(result.is_err()); |
| 553 | + let error_msg = result.unwrap_err().to_string(); |
| 554 | + assert!(error_msg.contains("Insufficient storage space") || error_msg.contains("Storage usage would exceed")); |
| 555 | + } |
| 556 | + |
| 557 | + #[tokio::test] |
| 558 | + async fn test_file_deletion() { |
| 559 | + let temp_dir = tempdir().unwrap(); |
| 560 | + let config = StorageConfig::default(); |
| 561 | + let manager = StorageManager::new(temp_dir.path(), config).await.unwrap(); |
| 562 | + |
| 563 | + let file_id = "delete-test"; |
| 564 | + let filename = "delete.txt"; |
| 565 | + let data = b"File to be deleted"; |
| 566 | + |
| 567 | + // Store file |
| 568 | + manager.store_file(file_id, data, filename).await.unwrap(); |
| 569 | + assert!(manager.file_exists(file_id).await.unwrap()); |
| 570 | + |
| 571 | + // Delete file |
| 572 | + let deleted = manager.delete_file(file_id).await.unwrap(); |
| 573 | + assert!(deleted); |
| 574 | + assert!(!manager.file_exists(file_id).await.unwrap()); |
| 575 | + |
| 576 | + // Verify capacity updated |
| 577 | + let capacity = manager.get_capacity_info().await; |
| 578 | + assert_eq!(capacity.file_count, 0); |
| 579 | + } |
| 580 | + |
| 581 | + #[tokio::test] |
| 582 | + async fn test_mime_type_detection() { |
| 583 | + let temp_dir = tempdir().unwrap(); |
| 584 | + let config = StorageConfig::default(); |
| 585 | + let manager = StorageManager::new(temp_dir.path(), config).await.unwrap(); |
| 586 | + |
| 587 | + // Test JPEG magic bytes |
| 588 | + let jpeg_data = vec![0xFF, 0xD8, 0xFF, 0xE0, 0x00, 0x10]; |
| 589 | + let mime = manager.detect_mime_type(&jpeg_data, "test.jpg"); |
| 590 | + assert_eq!(mime, Some("image/jpeg".to_string())); |
| 591 | + |
| 592 | + // Test extension-based detection |
| 593 | + let text_data = b"plain text content"; |
| 594 | + let mime = manager.detect_mime_type(text_data, "test.txt"); |
| 595 | + assert_eq!(mime, Some("text/plain".to_string())); |
| 596 | + } |
| 597 | +} |