Rust · 27882 bytes Raw Blame History
1 //! Transparency and audit logging system for ZephyrFS
2 //!
3 //! Provides comprehensive audit trails and transparency features while maintaining
4 //! zero-knowledge architecture and protecting user privacy.
5
6 use anyhow::{Context, Result};
7 use ring::digest::{Context as DigestContext, SHA256};
8 use serde::{Deserialize, Serialize};
9 use std::collections::HashMap;
10 use std::path::PathBuf;
11 use std::time::{SystemTime, UNIX_EPOCH};
12 use uuid::Uuid;
13
14 /// Audit logging configuration
15 #[derive(Debug, Clone, Serialize, Deserialize)]
16 pub struct AuditConfig {
17 /// Enable operation logging
18 pub enable_operation_logging: bool,
19 /// Enable access logging
20 pub enable_access_logging: bool,
21 /// Enable security event logging
22 pub enable_security_logging: bool,
23 /// Enable performance metrics logging
24 pub enable_performance_logging: bool,
25 /// Maximum log retention period (days)
26 pub log_retention_days: u32,
27 /// Log rotation size limit (bytes)
28 pub log_rotation_size: u64,
29 /// Enable log integrity verification
30 pub enable_log_integrity: bool,
31 }
32
33 impl Default for AuditConfig {
34 fn default() -> Self {
35 Self {
36 enable_operation_logging: true,
37 enable_access_logging: true,
38 enable_security_logging: true,
39 enable_performance_logging: true,
40 log_retention_days: 90,
41 log_rotation_size: 100 * 1024 * 1024, // 100MB
42 enable_log_integrity: true,
43 }
44 }
45 }
46
47 /// Types of audit events
48 #[derive(Debug, Clone, Serialize, Deserialize)]
49 pub enum AuditEventType {
50 /// Storage operations
51 Storage(StorageEvent),
52 /// Access control events
53 Access(AccessEvent),
54 /// Security-related events
55 Security(SecurityEvent),
56 /// Performance metrics
57 Performance(PerformanceEvent),
58 /// System health events
59 SystemHealth(SystemHealthEvent),
60 }
61
62 /// Storage operation events
63 #[derive(Debug, Clone, Serialize, Deserialize)]
64 pub enum StorageEvent {
65 ChunkStored { chunk_id: Uuid, size: u64, node_id: String },
66 ChunkRetrieved { chunk_id: Uuid, node_id: String },
67 ChunkDeleted { chunk_id: Uuid, node_id: String },
68 ChunkReplicated { chunk_id: Uuid, source_node: String, target_node: String },
69 ChunkCorrupted { chunk_id: Uuid, node_id: String, error_type: String },
70 }
71
72 /// Access control events
73 #[derive(Debug, Clone, Serialize, Deserialize)]
74 pub enum AccessEvent {
75 ChunkAccessed { chunk_id: Uuid, requester: String, access_type: String },
76 AccessDenied { chunk_id: Uuid, requester: String, reason: String },
77 AuthenticationAttempt { user_id: String, success: bool, method: String },
78 PermissionGranted { user_id: String, permission: String, resource: String },
79 PermissionRevoked { user_id: String, permission: String, resource: String },
80 }
81
82 /// Security-related events
83 #[derive(Debug, Clone, Serialize, Deserialize)]
84 pub enum SecurityEvent {
85 ThreatDetected { threat_type: String, severity: String, details: String },
86 ChunkQuarantined { chunk_id: Uuid, reason: String, threat_level: u8 },
87 IntegrityViolation { resource: String, violation_type: String },
88 EncryptionKeyRotation { key_id: String, rotation_reason: String },
89 SecurityPolicyViolation { policy: String, violation: String },
90 }
91
92 /// Performance metrics events
93 #[derive(Debug, Clone, Serialize, Deserialize)]
94 pub enum PerformanceEvent {
95 OperationTiming { operation: String, duration_ms: u64, success: bool },
96 ThroughputMeasurement { operation: String, bytes_per_second: u64 },
97 ResourceUtilization { cpu_percent: f32, memory_bytes: u64, disk_bytes: u64 },
98 NetworkLatency { target: String, latency_ms: u64 },
99 }
100
101 /// System health events
102 #[derive(Debug, Clone, Serialize, Deserialize)]
103 pub enum SystemHealthEvent {
104 NodeJoined { node_id: String, node_type: String },
105 NodeLeft { node_id: String, reason: String },
106 NetworkPartition { affected_nodes: Vec<String> },
107 StorageThresholdReached { threshold_type: String, current_value: f64, limit: f64 },
108 SystemMaintenanceScheduled { maintenance_type: String, scheduled_time: u64 },
109 }
110
111 /// Complete audit log entry
112 #[derive(Debug, Clone, Serialize, Deserialize)]
113 pub struct AuditLogEntry {
114 /// Unique entry identifier
115 pub entry_id: Uuid,
116 /// Timestamp when event occurred
117 pub timestamp: u64,
118 /// Type of audit event
119 pub event_type: AuditEventType,
120 /// Node that generated this event
121 pub source_node: String,
122 /// Event severity level
123 pub severity: AuditSeverity,
124 /// Additional contextual metadata
125 pub metadata: HashMap<String, String>,
126 /// Cryptographic hash for integrity
127 pub integrity_hash: Option<String>,
128 }
129
130 /// Severity levels for audit events
131 #[derive(Debug, Clone, Serialize, Deserialize)]
132 pub enum AuditSeverity {
133 Info,
134 Warning,
135 Error,
136 Critical,
137 }
138
139 /// Audit log query parameters
140 #[derive(Debug, Clone)]
141 pub struct AuditQuery {
142 /// Filter by time range
143 pub time_range: Option<(u64, u64)>,
144 /// Filter by event types
145 pub event_types: Option<Vec<String>>,
146 /// Filter by severity
147 pub severity: Option<AuditSeverity>,
148 /// Filter by source node
149 pub source_node: Option<String>,
150 /// Maximum number of results
151 pub limit: Option<usize>,
152 /// Include integrity verification
153 pub verify_integrity: bool,
154 }
155
156 /// Audit query results
157 #[derive(Debug, Clone)]
158 pub struct AuditQueryResult {
159 /// Matching log entries
160 pub entries: Vec<AuditLogEntry>,
161 /// Total number of matching entries
162 pub total_count: usize,
163 /// Query execution time
164 pub execution_time_ms: u64,
165 /// Integrity verification results
166 pub integrity_status: IntegrityStatus,
167 }
168
169 /// Integrity verification status for audit logs
170 #[derive(Debug, Clone)]
171 pub enum IntegrityStatus {
172 Verified,
173 Compromised { corrupted_entries: Vec<Uuid> },
174 UnknownNotChecked,
175 }
176
177 /// Transparency reporting metrics
178 #[derive(Debug, Clone, Serialize, Deserialize)]
179 pub struct TransparencyReport {
180 /// Reporting period start
181 pub period_start: u64,
182 /// Reporting period end
183 pub period_end: u64,
184 /// Total storage operations
185 pub storage_operations: StorageMetrics,
186 /// Access control statistics
187 pub access_statistics: AccessMetrics,
188 /// Security events summary
189 pub security_summary: SecurityMetrics,
190 /// Performance overview
191 pub performance_overview: PerformanceMetrics,
192 /// Node participation metrics
193 pub node_metrics: NodeMetrics,
194 }
195
196 /// Storage operation metrics
197 #[derive(Debug, Clone, Serialize, Deserialize)]
198 pub struct StorageMetrics {
199 pub total_chunks_stored: u64,
200 pub total_chunks_retrieved: u64,
201 pub total_chunks_deleted: u64,
202 pub total_bytes_stored: u64,
203 pub replication_events: u64,
204 pub corruption_events: u64,
205 }
206
207 /// Access control metrics
208 #[derive(Debug, Clone, Serialize, Deserialize)]
209 pub struct AccessMetrics {
210 pub total_access_attempts: u64,
211 pub successful_accesses: u64,
212 pub denied_accesses: u64,
213 pub authentication_attempts: u64,
214 pub successful_authentications: u64,
215 }
216
217 /// Security event metrics
218 #[derive(Debug, Clone, Serialize, Deserialize)]
219 pub struct SecurityMetrics {
220 pub threats_detected: u64,
221 pub chunks_quarantined: u64,
222 pub integrity_violations: u64,
223 pub policy_violations: u64,
224 pub key_rotations: u64,
225 }
226
227 /// Performance metrics summary
228 #[derive(Debug, Clone, Serialize, Deserialize)]
229 pub struct PerformanceMetrics {
230 pub average_operation_time_ms: f64,
231 pub peak_throughput_bps: u64,
232 pub average_cpu_usage: f32,
233 pub peak_memory_usage: u64,
234 pub network_latency_p95_ms: u64,
235 }
236
237 /// Node participation metrics
238 #[derive(Debug, Clone, Serialize, Deserialize)]
239 pub struct NodeMetrics {
240 pub active_nodes: u64,
241 pub nodes_joined: u64,
242 pub nodes_left: u64,
243 pub network_partitions: u64,
244 pub storage_threshold_breaches: u64,
245 }
246
247 /// Transparent audit logging system
248 pub struct TransparentAuditor {
249 config: AuditConfig,
250 node_id: String,
251 log_storage: Box<dyn AuditStorage>,
252 }
253
254 impl TransparentAuditor {
255 /// Create new transparent auditor
256 pub fn new(config: AuditConfig, node_id: String, log_storage: Box<dyn AuditStorage>) -> Self {
257 Self {
258 config,
259 node_id,
260 log_storage,
261 }
262 }
263
264 /// Log a storage operation event
265 pub async fn log_storage_event(&self, event: StorageEvent) -> Result<()> {
266 if !self.config.enable_operation_logging {
267 return Ok(());
268 }
269
270 let entry = self.create_audit_entry(
271 AuditEventType::Storage(event),
272 AuditSeverity::Info,
273 HashMap::new(),
274 )?;
275
276 self.log_storage.store_entry(entry).await?;
277 Ok(())
278 }
279
280 /// Log an access control event
281 pub async fn log_access_event(&self, event: AccessEvent) -> Result<()> {
282 if !self.config.enable_access_logging {
283 return Ok(());
284 }
285
286 let severity = match &event {
287 AccessEvent::AccessDenied { .. } => AuditSeverity::Warning,
288 AccessEvent::AuthenticationAttempt { success: false, .. } => AuditSeverity::Warning,
289 _ => AuditSeverity::Info,
290 };
291
292 let entry = self.create_audit_entry(
293 AuditEventType::Access(event),
294 severity,
295 HashMap::new(),
296 )?;
297
298 self.log_storage.store_entry(entry).await?;
299 Ok(())
300 }
301
302 /// Log a security event
303 pub async fn log_security_event(&self, event: SecurityEvent) -> Result<()> {
304 if !self.config.enable_security_logging {
305 return Ok(());
306 }
307
308 let severity = match &event {
309 SecurityEvent::ThreatDetected { severity, .. } => match severity.as_str() {
310 "critical" => AuditSeverity::Critical,
311 "high" => AuditSeverity::Error,
312 "medium" => AuditSeverity::Warning,
313 _ => AuditSeverity::Info,
314 },
315 SecurityEvent::IntegrityViolation { .. } => AuditSeverity::Error,
316 SecurityEvent::SecurityPolicyViolation { .. } => AuditSeverity::Warning,
317 _ => AuditSeverity::Info,
318 };
319
320 let entry = self.create_audit_entry(
321 AuditEventType::Security(event),
322 severity,
323 HashMap::new(),
324 )?;
325
326 self.log_storage.store_entry(entry).await?;
327 Ok(())
328 }
329
330 /// Log a performance event
331 pub async fn log_performance_event(&self, event: PerformanceEvent) -> Result<()> {
332 if !self.config.enable_performance_logging {
333 return Ok(());
334 }
335
336 let entry = self.create_audit_entry(
337 AuditEventType::Performance(event),
338 AuditSeverity::Info,
339 HashMap::new(),
340 )?;
341
342 self.log_storage.store_entry(entry).await?;
343 Ok(())
344 }
345
346 /// Log a system health event
347 pub async fn log_system_health_event(&self, event: SystemHealthEvent) -> Result<()> {
348 let severity = match &event {
349 SystemHealthEvent::NetworkPartition { .. } => AuditSeverity::Error,
350 SystemHealthEvent::StorageThresholdReached { .. } => AuditSeverity::Warning,
351 _ => AuditSeverity::Info,
352 };
353
354 let entry = self.create_audit_entry(
355 AuditEventType::SystemHealth(event),
356 severity,
357 HashMap::new(),
358 )?;
359
360 self.log_storage.store_entry(entry).await?;
361 Ok(())
362 }
363
364 /// Query audit logs with filtering
365 pub async fn query_logs(&self, query: AuditQuery) -> Result<AuditQueryResult> {
366 let start_time = SystemTime::now();
367
368 let entries = self.log_storage.query_entries(query.clone()).await?;
369
370 let execution_time = SystemTime::now()
371 .duration_since(start_time)
372 .unwrap_or_default()
373 .as_millis() as u64;
374
375 let integrity_status = if query.verify_integrity {
376 self.verify_log_integrity(&entries).await?
377 } else {
378 IntegrityStatus::UnknownNotChecked
379 };
380
381 Ok(AuditQueryResult {
382 total_count: entries.len(),
383 entries,
384 execution_time_ms: execution_time,
385 integrity_status,
386 })
387 }
388
389 /// Generate transparency report for a time period
390 pub async fn generate_transparency_report(
391 &self,
392 period_start: u64,
393 period_end: u64,
394 ) -> Result<TransparencyReport> {
395 let query = AuditQuery {
396 time_range: Some((period_start, period_end)),
397 event_types: None,
398 severity: None,
399 source_node: None,
400 limit: None,
401 verify_integrity: false,
402 };
403
404 let query_result = self.query_logs(query).await?;
405 let entries = query_result.entries;
406
407 // Aggregate metrics from log entries
408 let storage_operations = self.aggregate_storage_metrics(&entries);
409 let access_statistics = self.aggregate_access_metrics(&entries);
410 let security_summary = self.aggregate_security_metrics(&entries);
411 let performance_overview = self.aggregate_performance_metrics(&entries);
412 let node_metrics = self.aggregate_node_metrics(&entries);
413
414 Ok(TransparencyReport {
415 period_start,
416 period_end,
417 storage_operations,
418 access_statistics,
419 security_summary,
420 performance_overview,
421 node_metrics,
422 })
423 }
424
425 /// Create audit log entry with proper formatting
426 fn create_audit_entry(
427 &self,
428 event_type: AuditEventType,
429 severity: AuditSeverity,
430 metadata: HashMap<String, String>,
431 ) -> Result<AuditLogEntry> {
432 let entry_id = Uuid::new_v4();
433 let timestamp = SystemTime::now()
434 .duration_since(UNIX_EPOCH)
435 .context("Failed to get timestamp")?
436 .as_secs();
437
438 let integrity_hash = if self.config.enable_log_integrity {
439 Some(self.compute_entry_hash(&entry_id, timestamp, &event_type)?)
440 } else {
441 None
442 };
443
444 Ok(AuditLogEntry {
445 entry_id,
446 timestamp,
447 event_type,
448 source_node: self.node_id.clone(),
449 severity,
450 metadata,
451 integrity_hash,
452 })
453 }
454
455 /// Compute cryptographic hash for log entry integrity
456 fn compute_entry_hash(
457 &self,
458 entry_id: &Uuid,
459 timestamp: u64,
460 event_type: &AuditEventType,
461 ) -> Result<String> {
462 let mut context = DigestContext::new(&SHA256);
463
464 // Hash entry components
465 context.update(entry_id.as_bytes());
466 context.update(&timestamp.to_le_bytes());
467 context.update(self.node_id.as_bytes());
468
469 // Hash event type (serialized)
470 let event_bytes = serde_json::to_vec(event_type)
471 .context("Failed to serialize event type")?;
472 context.update(&event_bytes);
473
474 let hash = context.finish();
475 Ok(hex::encode(hash.as_ref()))
476 }
477
478 /// Verify integrity of log entries
479 async fn verify_log_integrity(&self, entries: &[AuditLogEntry]) -> Result<IntegrityStatus> {
480 if !self.config.enable_log_integrity {
481 return Ok(IntegrityStatus::UnknownNotChecked);
482 }
483
484 let mut corrupted_entries = Vec::new();
485
486 for entry in entries {
487 if let Some(stored_hash) = &entry.integrity_hash {
488 let computed_hash = self.compute_entry_hash(
489 &entry.entry_id,
490 entry.timestamp,
491 &entry.event_type,
492 )?;
493
494 if stored_hash != &computed_hash {
495 corrupted_entries.push(entry.entry_id);
496 }
497 }
498 }
499
500 if corrupted_entries.is_empty() {
501 Ok(IntegrityStatus::Verified)
502 } else {
503 Ok(IntegrityStatus::Compromised { corrupted_entries })
504 }
505 }
506
507 /// Aggregate storage metrics from log entries
508 fn aggregate_storage_metrics(&self, entries: &[AuditLogEntry]) -> StorageMetrics {
509 let mut metrics = StorageMetrics {
510 total_chunks_stored: 0,
511 total_chunks_retrieved: 0,
512 total_chunks_deleted: 0,
513 total_bytes_stored: 0,
514 replication_events: 0,
515 corruption_events: 0,
516 };
517
518 for entry in entries {
519 if let AuditEventType::Storage(storage_event) = &entry.event_type {
520 match storage_event {
521 StorageEvent::ChunkStored { size, .. } => {
522 metrics.total_chunks_stored += 1;
523 metrics.total_bytes_stored += size;
524 }
525 StorageEvent::ChunkRetrieved { .. } => {
526 metrics.total_chunks_retrieved += 1;
527 }
528 StorageEvent::ChunkDeleted { .. } => {
529 metrics.total_chunks_deleted += 1;
530 }
531 StorageEvent::ChunkReplicated { .. } => {
532 metrics.replication_events += 1;
533 }
534 StorageEvent::ChunkCorrupted { .. } => {
535 metrics.corruption_events += 1;
536 }
537 }
538 }
539 }
540
541 metrics
542 }
543
544 /// Aggregate access metrics from log entries
545 fn aggregate_access_metrics(&self, entries: &[AuditLogEntry]) -> AccessMetrics {
546 let mut metrics = AccessMetrics {
547 total_access_attempts: 0,
548 successful_accesses: 0,
549 denied_accesses: 0,
550 authentication_attempts: 0,
551 successful_authentications: 0,
552 };
553
554 for entry in entries {
555 if let AuditEventType::Access(access_event) = &entry.event_type {
556 match access_event {
557 AccessEvent::ChunkAccessed { .. } => {
558 metrics.total_access_attempts += 1;
559 metrics.successful_accesses += 1;
560 }
561 AccessEvent::AccessDenied { .. } => {
562 metrics.total_access_attempts += 1;
563 metrics.denied_accesses += 1;
564 }
565 AccessEvent::AuthenticationAttempt { success, .. } => {
566 metrics.authentication_attempts += 1;
567 if *success {
568 metrics.successful_authentications += 1;
569 }
570 }
571 _ => {}
572 }
573 }
574 }
575
576 metrics
577 }
578
579 /// Aggregate security metrics from log entries
580 fn aggregate_security_metrics(&self, entries: &[AuditLogEntry]) -> SecurityMetrics {
581 let mut metrics = SecurityMetrics {
582 threats_detected: 0,
583 chunks_quarantined: 0,
584 integrity_violations: 0,
585 policy_violations: 0,
586 key_rotations: 0,
587 };
588
589 for entry in entries {
590 if let AuditEventType::Security(security_event) = &entry.event_type {
591 match security_event {
592 SecurityEvent::ThreatDetected { .. } => {
593 metrics.threats_detected += 1;
594 }
595 SecurityEvent::ChunkQuarantined { .. } => {
596 metrics.chunks_quarantined += 1;
597 }
598 SecurityEvent::IntegrityViolation { .. } => {
599 metrics.integrity_violations += 1;
600 }
601 SecurityEvent::SecurityPolicyViolation { .. } => {
602 metrics.policy_violations += 1;
603 }
604 SecurityEvent::EncryptionKeyRotation { .. } => {
605 metrics.key_rotations += 1;
606 }
607 }
608 }
609 }
610
611 metrics
612 }
613
614 /// Aggregate performance metrics from log entries
615 fn aggregate_performance_metrics(&self, entries: &[AuditLogEntry]) -> PerformanceMetrics {
616 let mut operation_times = Vec::new();
617 let mut throughputs = Vec::new();
618 let mut cpu_usages = Vec::new();
619 let mut memory_usages = Vec::new();
620 let mut latencies = Vec::new();
621
622 for entry in entries {
623 if let AuditEventType::Performance(perf_event) = &entry.event_type {
624 match perf_event {
625 PerformanceEvent::OperationTiming { duration_ms, .. } => {
626 operation_times.push(*duration_ms as f64);
627 }
628 PerformanceEvent::ThroughputMeasurement { bytes_per_second, .. } => {
629 throughputs.push(*bytes_per_second);
630 }
631 PerformanceEvent::ResourceUtilization { cpu_percent, memory_bytes, .. } => {
632 cpu_usages.push(*cpu_percent);
633 memory_usages.push(*memory_bytes);
634 }
635 PerformanceEvent::NetworkLatency { latency_ms, .. } => {
636 latencies.push(*latency_ms);
637 }
638 }
639 }
640 }
641
642 let average_operation_time_ms = if operation_times.is_empty() {
643 0.0
644 } else {
645 operation_times.iter().sum::<f64>() / operation_times.len() as f64
646 };
647
648 let peak_throughput_bps = throughputs.iter().max().copied().unwrap_or(0);
649
650 let average_cpu_usage = if cpu_usages.is_empty() {
651 0.0
652 } else {
653 cpu_usages.iter().sum::<f32>() / cpu_usages.len() as f32
654 };
655
656 let peak_memory_usage = memory_usages.iter().max().copied().unwrap_or(0);
657
658 // Calculate 95th percentile latency
659 let mut sorted_latencies = latencies;
660 sorted_latencies.sort_unstable();
661 let p95_index = (sorted_latencies.len() as f64 * 0.95) as usize;
662 let network_latency_p95_ms = sorted_latencies.get(p95_index).copied().unwrap_or(0);
663
664 PerformanceMetrics {
665 average_operation_time_ms,
666 peak_throughput_bps,
667 average_cpu_usage,
668 peak_memory_usage,
669 network_latency_p95_ms,
670 }
671 }
672
673 /// Aggregate node metrics from log entries
674 fn aggregate_node_metrics(&self, entries: &[AuditLogEntry]) -> NodeMetrics {
675 let mut metrics = NodeMetrics {
676 active_nodes: 0, // This would need to be calculated differently
677 nodes_joined: 0,
678 nodes_left: 0,
679 network_partitions: 0,
680 storage_threshold_breaches: 0,
681 };
682
683 for entry in entries {
684 if let AuditEventType::SystemHealth(health_event) = &entry.event_type {
685 match health_event {
686 SystemHealthEvent::NodeJoined { .. } => {
687 metrics.nodes_joined += 1;
688 }
689 SystemHealthEvent::NodeLeft { .. } => {
690 metrics.nodes_left += 1;
691 }
692 SystemHealthEvent::NetworkPartition { .. } => {
693 metrics.network_partitions += 1;
694 }
695 SystemHealthEvent::StorageThresholdReached { .. } => {
696 metrics.storage_threshold_breaches += 1;
697 }
698 _ => {}
699 }
700 }
701 }
702
703 metrics
704 }
705 }
706
707 /// Trait for audit log storage backends
708 #[async_trait::async_trait]
709 pub trait AuditStorage: Send + Sync {
710 /// Store a new audit log entry
711 async fn store_entry(&self, entry: AuditLogEntry) -> Result<()>;
712
713 /// Query audit log entries with filters
714 async fn query_entries(&self, query: AuditQuery) -> Result<Vec<AuditLogEntry>>;
715
716 /// Rotate old log files
717 async fn rotate_logs(&self, retention_days: u32) -> Result<()>;
718
719 /// Get storage statistics
720 async fn get_storage_stats(&self) -> Result<(u64, u64)>; // (total_entries, total_size_bytes)
721 }
722
723 #[cfg(test)]
724 mod tests {
725 use super::*;
726 use std::sync::{Arc, Mutex};
727
728 /// In-memory audit storage for testing
729 struct MemoryAuditStorage {
730 entries: Arc<Mutex<Vec<AuditLogEntry>>>,
731 }
732
733 impl MemoryAuditStorage {
734 fn new() -> Self {
735 Self {
736 entries: Arc::new(Mutex::new(Vec::new())),
737 }
738 }
739 }
740
741 #[async_trait::async_trait]
742 impl AuditStorage for MemoryAuditStorage {
743 async fn store_entry(&self, entry: AuditLogEntry) -> Result<()> {
744 let mut entries = self.entries.lock().unwrap();
745 entries.push(entry);
746 Ok(())
747 }
748
749 async fn query_entries(&self, query: AuditQuery) -> Result<Vec<AuditLogEntry>> {
750 let entries = self.entries.lock().unwrap();
751 let mut filtered: Vec<_> = entries.clone();
752
753 // Apply time range filter
754 if let Some((start, end)) = query.time_range {
755 filtered.retain(|entry| entry.timestamp >= start && entry.timestamp <= end);
756 }
757
758 // Apply limit
759 if let Some(limit) = query.limit {
760 filtered.truncate(limit);
761 }
762
763 Ok(filtered)
764 }
765
766 async fn rotate_logs(&self, _retention_days: u32) -> Result<()> {
767 // No-op for in-memory storage
768 Ok(())
769 }
770
771 async fn get_storage_stats(&self) -> Result<(u64, u64)> {
772 let entries = self.entries.lock().unwrap();
773 Ok((entries.len() as u64, 0)) // Size calculation would be more complex
774 }
775 }
776
777 #[tokio::test]
778 async fn test_audit_logging() -> Result<()> {
779 let config = AuditConfig::default();
780 let storage = Box::new(MemoryAuditStorage::new());
781 let auditor = TransparentAuditor::new(config, "test-node".to_string(), storage);
782
783 // Log some events
784 auditor.log_storage_event(StorageEvent::ChunkStored {
785 chunk_id: Uuid::new_v4(),
786 size: 1024,
787 node_id: "node-1".to_string(),
788 }).await?;
789
790 auditor.log_security_event(SecurityEvent::ThreatDetected {
791 threat_type: "malware".to_string(),
792 severity: "medium".to_string(),
793 details: "Test threat".to_string(),
794 }).await?;
795
796 // Query logs
797 let query = AuditQuery {
798 time_range: None,
799 event_types: None,
800 severity: None,
801 source_node: None,
802 limit: None,
803 verify_integrity: true,
804 };
805
806 let result = auditor.query_logs(query).await?;
807 assert_eq!(result.entries.len(), 2);
808 assert!(matches!(result.integrity_status, IntegrityStatus::Verified));
809
810 Ok(())
811 }
812
813 #[tokio::test]
814 async fn test_transparency_report() -> Result<()> {
815 let config = AuditConfig::default();
816 let storage = Box::new(MemoryAuditStorage::new());
817 let auditor = TransparentAuditor::new(config, "test-node".to_string(), storage);
818
819 // Log various events
820 for i in 0..5 {
821 auditor.log_storage_event(StorageEvent::ChunkStored {
822 chunk_id: Uuid::new_v4(),
823 size: (i + 1) * 1000,
824 node_id: format!("node-{}", i),
825 }).await?;
826 }
827
828 let current_time = SystemTime::now()
829 .duration_since(UNIX_EPOCH)
830 .unwrap()
831 .as_secs();
832
833 let report = auditor.generate_transparency_report(
834 current_time - 3600, // 1 hour ago
835 current_time,
836 ).await?;
837
838 assert_eq!(report.storage_operations.total_chunks_stored, 5);
839 assert_eq!(report.storage_operations.total_bytes_stored, 15000);
840
841 Ok(())
842 }
843 }