Rust · 28482 bytes Raw Blame History
1 //! Democratic space allocation algorithm for ZephyrFS
2 //!
3 //! Implements fair, transparent, and democratic allocation of storage space
4 //! across the network based on capacity, demand, and volunteer preferences.
5
6 use anyhow::{Context, Result};
7 use serde::{Deserialize, Serialize};
8 use std::collections::{HashMap, BTreeMap};
9 use std::time::{SystemTime, UNIX_EPOCH};
10 use uuid::Uuid;
11
12 /// Configuration for democratic space allocation
13 #[derive(Debug, Clone, Serialize, Deserialize)]
14 pub struct AllocationConfig {
15 /// Minimum storage allocation per volunteer (GB)
16 pub min_allocation_gb: f64,
17 /// Maximum storage allocation per volunteer (GB)
18 pub max_allocation_gb: f64,
19 /// Target network utilization percentage (0.0-1.0)
20 pub target_utilization: f64,
21 /// Weight for capacity-based allocation (0.0-1.0)
22 pub capacity_weight: f64,
23 /// Weight for demand-based allocation (0.0-1.0)
24 pub demand_weight: f64,
25 /// Weight for performance-based allocation (0.0-1.0)
26 pub performance_weight: f64,
27 /// Rebalancing frequency (seconds)
28 pub rebalancing_interval: u64,
29 /// Enable fair queuing for requests
30 pub enable_fair_queuing: bool,
31 }
32
33 impl Default for AllocationConfig {
34 fn default() -> Self {
35 Self {
36 min_allocation_gb: 1.0,
37 max_allocation_gb: 100.0,
38 target_utilization: 0.75,
39 capacity_weight: 0.4,
40 demand_weight: 0.3,
41 performance_weight: 0.3,
42 rebalancing_interval: 3600, // 1 hour
43 enable_fair_queuing: true,
44 }
45 }
46 }
47
48 /// Information about a storage volunteer node
49 #[derive(Debug, Clone, Serialize, Deserialize)]
50 pub struct VolunteerNode {
51 /// Unique node identifier
52 pub node_id: Uuid,
53 /// Total available capacity (GB)
54 pub total_capacity_gb: f64,
55 /// Currently allocated space (GB)
56 pub allocated_space_gb: f64,
57 /// Currently used space (GB)
58 pub used_space_gb: f64,
59 /// Node performance metrics
60 pub performance: NodePerformance,
61 /// Node preferences and constraints
62 pub preferences: NodePreferences,
63 /// Geographic location (for distribution)
64 pub location: Option<GeographicLocation>,
65 /// Node reliability score (0.0-1.0)
66 pub reliability_score: f64,
67 /// Last seen timestamp
68 pub last_seen: u64,
69 /// Node status
70 pub status: NodeStatus,
71 }
72
73 /// Node performance metrics
74 #[derive(Debug, Clone, Serialize, Deserialize)]
75 pub struct NodePerformance {
76 /// Average response time (milliseconds)
77 pub avg_response_time_ms: f64,
78 /// Bandwidth capacity (Mbps)
79 pub bandwidth_mbps: f64,
80 /// Uptime percentage (0.0-1.0)
81 pub uptime_percentage: f64,
82 /// Error rate (0.0-1.0)
83 pub error_rate: f64,
84 /// CPU usage percentage (0.0-1.0)
85 pub cpu_usage: f64,
86 /// Memory usage percentage (0.0-1.0)
87 pub memory_usage: f64,
88 }
89
90 /// Node preferences and constraints
91 #[derive(Debug, Clone, Serialize, Deserialize)]
92 pub struct NodePreferences {
93 /// Maximum space willing to provide (GB)
94 pub max_contribution_gb: f64,
95 /// Preferred operating hours (24-hour format)
96 pub preferred_hours: Option<(u8, u8)>,
97 /// Bandwidth throttling preferences
98 pub bandwidth_limit_mbps: Option<f64>,
99 /// Content type preferences
100 pub content_preferences: Vec<String>,
101 /// Minimum reward rate required
102 pub min_reward_rate: Option<f64>,
103 }
104
105 /// Geographic location for distribution
106 #[derive(Debug, Clone, Serialize, Deserialize)]
107 pub struct GeographicLocation {
108 /// Country code
109 pub country: String,
110 /// Region/state
111 pub region: String,
112 /// City
113 pub city: String,
114 /// Latitude
115 pub latitude: f64,
116 /// Longitude
117 pub longitude: f64,
118 }
119
120 /// Node operational status
121 #[derive(Debug, Clone, Serialize, Deserialize)]
122 pub enum NodeStatus {
123 Active,
124 Inactive,
125 Maintenance,
126 Overloaded,
127 Error(String),
128 }
129
130 /// Storage allocation request
131 #[derive(Debug, Clone, Serialize, Deserialize)]
132 pub struct AllocationRequest {
133 /// Request identifier
134 pub request_id: Uuid,
135 /// Requesting user ID
136 pub user_id: String,
137 /// Requested storage amount (GB)
138 pub requested_gb: f64,
139 /// Priority level (0-10, higher is more urgent)
140 pub priority: u8,
141 /// Content type hint
142 pub content_type: Option<String>,
143 /// Geographic preference
144 pub geo_preference: Option<String>,
145 /// Performance requirements
146 pub performance_requirements: PerformanceRequirements,
147 /// Request timestamp
148 pub created_at: u64,
149 }
150
151 /// Performance requirements for allocation
152 #[derive(Debug, Clone, Serialize, Deserialize)]
153 pub struct PerformanceRequirements {
154 /// Maximum acceptable latency (milliseconds)
155 pub max_latency_ms: Option<f64>,
156 /// Minimum bandwidth requirement (Mbps)
157 pub min_bandwidth_mbps: Option<f64>,
158 /// Minimum uptime requirement (0.0-1.0)
159 pub min_uptime: Option<f64>,
160 /// Redundancy factor (number of copies)
161 pub redundancy_factor: u8,
162 }
163
164 /// Result of space allocation
165 #[derive(Debug, Clone, Serialize, Deserialize)]
166 pub struct AllocationResult {
167 /// Request this allocation fulfills
168 pub request_id: Uuid,
169 /// Allocated nodes and their contributions
170 pub allocations: Vec<NodeAllocation>,
171 /// Total allocated space (GB)
172 pub total_allocated_gb: f64,
173 /// Allocation quality score (0.0-1.0)
174 pub quality_score: f64,
175 /// Allocation strategy used
176 pub strategy: AllocationStrategy,
177 /// Allocation timestamp
178 pub allocated_at: u64,
179 }
180
181 /// Individual node allocation
182 #[derive(Debug, Clone, Serialize, Deserialize)]
183 pub struct NodeAllocation {
184 /// Node receiving the allocation
185 pub node_id: Uuid,
186 /// Amount allocated to this node (GB)
187 pub allocated_gb: f64,
188 /// Expected reward for this allocation
189 pub reward_amount: f64,
190 /// Allocation priority on this node
191 pub priority: u8,
192 /// Performance score for this allocation
193 pub performance_score: f64,
194 }
195
196 /// Allocation strategy used
197 #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
198 pub enum AllocationStrategy {
199 /// Capacity-based allocation
200 CapacityBased,
201 /// Performance-based allocation
202 PerformanceBased,
203 /// Geographic distribution
204 GeographicDistribution,
205 /// Load balancing
206 LoadBalancing,
207 /// Hybrid approach
208 Hybrid,
209 }
210
211 /// Network allocation statistics
212 #[derive(Debug, Clone, Serialize, Deserialize)]
213 pub struct AllocationStats {
214 /// Total network capacity (GB)
215 pub total_capacity_gb: f64,
216 /// Total allocated space (GB)
217 pub total_allocated_gb: f64,
218 /// Total used space (GB)
219 pub total_used_gb: f64,
220 /// Network utilization (0.0-1.0)
221 pub utilization: f64,
222 /// Number of active nodes
223 pub active_nodes: usize,
224 /// Number of pending requests
225 pub pending_requests: usize,
226 /// Average allocation quality
227 pub avg_quality_score: f64,
228 }
229
230 /// Democratic space allocation engine
231 pub struct DemocraticAllocator {
232 config: AllocationConfig,
233 volunteer_nodes: HashMap<Uuid, VolunteerNode>,
234 pending_requests: BTreeMap<u64, AllocationRequest>, // Ordered by timestamp
235 active_allocations: HashMap<Uuid, AllocationResult>,
236 allocation_history: Vec<AllocationResult>,
237 }
238
239 impl DemocraticAllocator {
240 /// Create new democratic allocator
241 pub fn new(config: AllocationConfig) -> Self {
242 Self {
243 config,
244 volunteer_nodes: HashMap::new(),
245 pending_requests: BTreeMap::new(),
246 active_allocations: HashMap::new(),
247 allocation_history: Vec::new(),
248 }
249 }
250
251 /// Register a new volunteer node
252 pub fn register_volunteer(&mut self, node: VolunteerNode) -> Result<()> {
253 self.volunteer_nodes.insert(node.node_id, node);
254 Ok(())
255 }
256
257 /// Update volunteer node information
258 pub fn update_volunteer(&mut self, node_id: Uuid, updates: VolunteerNodeUpdate) -> Result<()> {
259 if let Some(node) = self.volunteer_nodes.get_mut(&node_id) {
260 self.apply_node_updates(node, updates);
261 Ok(())
262 } else {
263 Err(anyhow::anyhow!("Volunteer node not found: {}", node_id))
264 }
265 }
266
267 /// Submit a storage allocation request
268 pub fn request_allocation(&mut self, mut request: AllocationRequest) -> Result<Uuid> {
269 let current_time = SystemTime::now()
270 .duration_since(UNIX_EPOCH)
271 .context("Failed to get timestamp")?
272 .as_secs();
273
274 request.created_at = current_time;
275 let request_id = request.request_id;
276
277 self.pending_requests.insert(current_time, request);
278 Ok(request_id)
279 }
280
281 /// Process pending allocation requests
282 pub fn process_allocations(&mut self) -> Result<Vec<AllocationResult>> {
283 let mut results = Vec::new();
284
285 // Process requests in order (FIFO with priority consideration)
286 let requests_to_process: Vec<_> = self.pending_requests.values().cloned().collect();
287
288 for request in requests_to_process {
289 if let Ok(result) = self.allocate_storage(&request) {
290 // Remove from pending
291 self.pending_requests.retain(|_, r| r.request_id != request.request_id);
292
293 // Store result
294 self.active_allocations.insert(result.request_id, result.clone());
295 self.allocation_history.push(result.clone());
296 results.push(result);
297 }
298 }
299
300 Ok(results)
301 }
302
303 /// Allocate storage for a specific request
304 pub fn allocate_storage(&self, request: &AllocationRequest) -> Result<AllocationResult> {
305 // Filter eligible nodes
306 let eligible_nodes = self.filter_eligible_nodes(request)?;
307
308 if eligible_nodes.is_empty() {
309 return Err(anyhow::anyhow!("No eligible nodes available for allocation"));
310 }
311
312 // Calculate optimal allocation strategy
313 let strategy = self.determine_allocation_strategy(request, &eligible_nodes);
314
315 // Perform allocation based on strategy
316 let allocations = match strategy {
317 AllocationStrategy::CapacityBased => self.allocate_by_capacity(request, &eligible_nodes)?,
318 AllocationStrategy::PerformanceBased => self.allocate_by_performance(request, &eligible_nodes)?,
319 AllocationStrategy::GeographicDistribution => self.allocate_by_geography(request, &eligible_nodes)?,
320 AllocationStrategy::LoadBalancing => self.allocate_by_load_balancing(request, &eligible_nodes)?,
321 AllocationStrategy::Hybrid => self.allocate_hybrid(request, &eligible_nodes)?,
322 };
323
324 let total_allocated_gb = allocations.iter().map(|a| a.allocated_gb).sum();
325 let quality_score = self.calculate_allocation_quality(&allocations, request);
326
327 let current_time = SystemTime::now()
328 .duration_since(UNIX_EPOCH)
329 .context("Failed to get timestamp")?
330 .as_secs();
331
332 Ok(AllocationResult {
333 request_id: request.request_id,
334 allocations,
335 total_allocated_gb,
336 quality_score,
337 strategy,
338 allocated_at: current_time,
339 })
340 }
341
342 /// Get network allocation statistics
343 pub fn get_allocation_stats(&self) -> AllocationStats {
344 let total_capacity_gb = self.volunteer_nodes.values()
345 .map(|n| n.total_capacity_gb)
346 .sum();
347
348 let total_allocated_gb = self.volunteer_nodes.values()
349 .map(|n| n.allocated_space_gb)
350 .sum();
351
352 let total_used_gb = self.volunteer_nodes.values()
353 .map(|n| n.used_space_gb)
354 .sum();
355
356 let utilization = if total_capacity_gb > 0.0 {
357 total_used_gb / total_capacity_gb
358 } else {
359 0.0
360 };
361
362 let active_nodes = self.volunteer_nodes.values()
363 .filter(|n| matches!(n.status, NodeStatus::Active))
364 .count();
365
366 let avg_quality_score = if !self.allocation_history.is_empty() {
367 self.allocation_history.iter()
368 .map(|a| a.quality_score)
369 .sum::<f64>() / self.allocation_history.len() as f64
370 } else {
371 0.0
372 };
373
374 AllocationStats {
375 total_capacity_gb,
376 total_allocated_gb,
377 total_used_gb,
378 utilization,
379 active_nodes,
380 pending_requests: self.pending_requests.len(),
381 avg_quality_score,
382 }
383 }
384
385 /// Rebalance allocations across the network
386 pub fn rebalance_network(&mut self) -> Result<Vec<RebalancingAction>> {
387 let stats = self.get_allocation_stats();
388 let mut actions = Vec::new();
389
390 // Check if rebalancing is needed
391 if stats.utilization > self.config.target_utilization + 0.1 ||
392 stats.utilization < self.config.target_utilization - 0.2 {
393
394 // Calculate optimal rebalancing moves
395 let rebalancing_plan = self.calculate_rebalancing_plan(&stats)?;
396
397 for action in rebalancing_plan {
398 actions.push(action);
399 }
400 }
401
402 Ok(actions)
403 }
404
405 /// Filter nodes eligible for a request
406 fn filter_eligible_nodes(&self, request: &AllocationRequest) -> Result<Vec<&VolunteerNode>> {
407 Ok(self.volunteer_nodes.values()
408 .filter(|node| {
409 // Basic eligibility checks
410 matches!(node.status, NodeStatus::Active) &&
411 node.total_capacity_gb - node.allocated_space_gb >= 0.1 && // At least 100MB free
412 node.performance.uptime_percentage >= 0.9 && // At least 90% uptime
413 self.meets_performance_requirements(node, &request.performance_requirements)
414 })
415 .collect())
416 }
417
418 /// Check if node meets performance requirements
419 fn meets_performance_requirements(&self, node: &VolunteerNode, reqs: &PerformanceRequirements) -> bool {
420 if let Some(max_latency) = reqs.max_latency_ms {
421 if node.performance.avg_response_time_ms > max_latency {
422 return false;
423 }
424 }
425
426 if let Some(min_bandwidth) = reqs.min_bandwidth_mbps {
427 if node.performance.bandwidth_mbps < min_bandwidth {
428 return false;
429 }
430 }
431
432 if let Some(min_uptime) = reqs.min_uptime {
433 if node.performance.uptime_percentage < min_uptime {
434 return false;
435 }
436 }
437
438 true
439 }
440
441 /// Determine optimal allocation strategy
442 fn determine_allocation_strategy(
443 &self,
444 request: &AllocationRequest,
445 eligible_nodes: &[&VolunteerNode],
446 ) -> AllocationStrategy {
447 // Simple heuristic-based strategy selection
448 if request.performance_requirements.min_bandwidth_mbps.is_some() ||
449 request.performance_requirements.max_latency_ms.is_some() {
450 AllocationStrategy::PerformanceBased
451 } else if request.geo_preference.is_some() {
452 AllocationStrategy::GeographicDistribution
453 } else if eligible_nodes.len() > 10 {
454 AllocationStrategy::LoadBalancing
455 } else {
456 AllocationStrategy::Hybrid
457 }
458 }
459
460 /// Allocate storage based on node capacity
461 fn allocate_by_capacity(
462 &self,
463 request: &AllocationRequest,
464 eligible_nodes: &[&VolunteerNode],
465 ) -> Result<Vec<NodeAllocation>> {
466 let mut allocations = Vec::new();
467 let mut remaining_gb = request.requested_gb;
468
469 // Sort nodes by available capacity (descending)
470 let mut sorted_nodes: Vec<_> = eligible_nodes.iter().collect();
471 sorted_nodes.sort_by(|a, b| {
472 let a_available = a.total_capacity_gb - a.allocated_space_gb;
473 let b_available = b.total_capacity_gb - b.allocated_space_gb;
474 b_available.partial_cmp(&a_available).unwrap()
475 });
476
477 for node in sorted_nodes {
478 if remaining_gb <= 0.0 {
479 break;
480 }
481
482 let available_gb = node.total_capacity_gb - node.allocated_space_gb;
483 let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb);
484
485 if to_allocate >= self.config.min_allocation_gb {
486 allocations.push(NodeAllocation {
487 node_id: node.node_id,
488 allocated_gb: to_allocate,
489 reward_amount: to_allocate * 0.01, // $0.01 per GB
490 priority: request.priority,
491 performance_score: self.calculate_node_performance_score(node),
492 });
493
494 remaining_gb -= to_allocate;
495 }
496 }
497
498 Ok(allocations)
499 }
500
501 /// Allocate storage based on node performance
502 fn allocate_by_performance(
503 &self,
504 request: &AllocationRequest,
505 eligible_nodes: &[&VolunteerNode],
506 ) -> Result<Vec<NodeAllocation>> {
507 let mut allocations = Vec::new();
508 let mut remaining_gb = request.requested_gb;
509
510 // Sort nodes by performance score (descending)
511 let mut sorted_nodes: Vec<_> = eligible_nodes.iter()
512 .map(|node| (node, self.calculate_node_performance_score(node)))
513 .collect();
514 sorted_nodes.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
515
516 for (node, performance_score) in sorted_nodes {
517 if remaining_gb <= 0.0 {
518 break;
519 }
520
521 let available_gb = node.total_capacity_gb - node.allocated_space_gb;
522 let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb);
523
524 if to_allocate >= self.config.min_allocation_gb {
525 allocations.push(NodeAllocation {
526 node_id: node.node_id,
527 allocated_gb: to_allocate,
528 reward_amount: to_allocate * 0.015 * performance_score, // Performance bonus
529 priority: request.priority,
530 performance_score,
531 });
532
533 remaining_gb -= to_allocate;
534 }
535 }
536
537 Ok(allocations)
538 }
539
540 /// Allocate storage based on geographic distribution
541 fn allocate_by_geography(
542 &self,
543 request: &AllocationRequest,
544 eligible_nodes: &[&VolunteerNode],
545 ) -> Result<Vec<NodeAllocation>> {
546 // For now, fall back to capacity-based allocation
547 // In a real implementation, this would consider geographic distribution
548 self.allocate_by_capacity(request, eligible_nodes)
549 }
550
551 /// Allocate storage using load balancing
552 fn allocate_by_load_balancing(
553 &self,
554 request: &AllocationRequest,
555 eligible_nodes: &[&VolunteerNode],
556 ) -> Result<Vec<NodeAllocation>> {
557 let mut allocations = Vec::new();
558 let mut remaining_gb = request.requested_gb;
559
560 // Sort nodes by current utilization (ascending)
561 let mut sorted_nodes: Vec<_> = eligible_nodes.iter().collect();
562 sorted_nodes.sort_by(|a, b| {
563 let a_utilization = a.used_space_gb / a.total_capacity_gb;
564 let b_utilization = b.used_space_gb / b.total_capacity_gb;
565 a_utilization.partial_cmp(&b_utilization).unwrap()
566 });
567
568 for node in sorted_nodes {
569 if remaining_gb <= 0.0 {
570 break;
571 }
572
573 let available_gb = node.total_capacity_gb - node.allocated_space_gb;
574 let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb);
575
576 if to_allocate >= self.config.min_allocation_gb {
577 allocations.push(NodeAllocation {
578 node_id: node.node_id,
579 allocated_gb: to_allocate,
580 reward_amount: to_allocate * 0.01,
581 priority: request.priority,
582 performance_score: self.calculate_node_performance_score(node),
583 });
584
585 remaining_gb -= to_allocate;
586 }
587 }
588
589 Ok(allocations)
590 }
591
592 /// Allocate storage using hybrid approach
593 fn allocate_hybrid(
594 &self,
595 request: &AllocationRequest,
596 eligible_nodes: &[&VolunteerNode],
597 ) -> Result<Vec<NodeAllocation>> {
598 // Hybrid scoring based on capacity, performance, and load
599 let mut node_scores: Vec<_> = eligible_nodes.iter()
600 .map(|node| {
601 let available_gb = node.total_capacity_gb - node.allocated_space_gb;
602 let utilization = node.used_space_gb / node.total_capacity_gb;
603 let performance_score = self.calculate_node_performance_score(node);
604
605 let capacity_score = available_gb / self.config.max_allocation_gb;
606 let load_score = 1.0 - utilization;
607
608 let hybrid_score =
609 self.config.capacity_weight * capacity_score +
610 self.config.performance_weight * performance_score +
611 (1.0 - self.config.capacity_weight - self.config.performance_weight) * load_score;
612
613 (node, hybrid_score)
614 })
615 .collect();
616
617 // Sort by hybrid score (descending)
618 node_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
619
620 let mut allocations = Vec::new();
621 let mut remaining_gb = request.requested_gb;
622
623 for (node, score) in node_scores {
624 if remaining_gb <= 0.0 {
625 break;
626 }
627
628 let available_gb = node.total_capacity_gb - node.allocated_space_gb;
629 let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb);
630
631 if to_allocate >= self.config.min_allocation_gb {
632 allocations.push(NodeAllocation {
633 node_id: node.node_id,
634 allocated_gb: to_allocate,
635 reward_amount: to_allocate * 0.012 * score, // Score-based reward
636 priority: request.priority,
637 performance_score: self.calculate_node_performance_score(node),
638 });
639
640 remaining_gb -= to_allocate;
641 }
642 }
643
644 Ok(allocations)
645 }
646
647 /// Calculate node performance score
648 fn calculate_node_performance_score(&self, node: &VolunteerNode) -> f64 {
649 let latency_score = (1000.0 - node.performance.avg_response_time_ms).max(0.0) / 1000.0;
650 let bandwidth_score = (node.performance.bandwidth_mbps / 100.0).min(1.0);
651 let uptime_score = node.performance.uptime_percentage;
652 let reliability_score = node.reliability_score;
653 let error_score = 1.0 - node.performance.error_rate;
654
655 (latency_score + bandwidth_score + uptime_score + reliability_score + error_score) / 5.0
656 }
657
658 /// Calculate quality score for an allocation
659 fn calculate_allocation_quality(&self, allocations: &[NodeAllocation], request: &AllocationRequest) -> f64 {
660 if allocations.is_empty() {
661 return 0.0;
662 }
663
664 let total_allocated = allocations.iter().map(|a| a.allocated_gb).sum::<f64>();
665 let fulfillment_ratio = (total_allocated / request.requested_gb).min(1.0);
666
667 let avg_performance = allocations.iter()
668 .map(|a| a.performance_score)
669 .sum::<f64>() / allocations.len() as f64;
670
671 let diversity_bonus = if allocations.len() > 1 { 0.1 } else { 0.0 };
672
673 (fulfillment_ratio * 0.6 + avg_performance * 0.3 + diversity_bonus).min(1.0)
674 }
675
676 /// Apply updates to a volunteer node
677 fn apply_node_updates(&self, node: &mut VolunteerNode, updates: VolunteerNodeUpdate) {
678 if let Some(capacity) = updates.total_capacity_gb {
679 node.total_capacity_gb = capacity;
680 }
681 if let Some(used) = updates.used_space_gb {
682 node.used_space_gb = used;
683 }
684 if let Some(performance) = updates.performance {
685 node.performance = performance;
686 }
687 if let Some(status) = updates.status {
688 node.status = status;
689 }
690 }
691
692 /// Calculate rebalancing plan
693 fn calculate_rebalancing_plan(&self, _stats: &AllocationStats) -> Result<Vec<RebalancingAction>> {
694 // Simplified rebalancing - in production this would be more sophisticated
695 Ok(vec![])
696 }
697 }
698
699 /// Updates for volunteer node information
700 #[derive(Debug, Clone)]
701 pub struct VolunteerNodeUpdate {
702 pub total_capacity_gb: Option<f64>,
703 pub used_space_gb: Option<f64>,
704 pub performance: Option<NodePerformance>,
705 pub status: Option<NodeStatus>,
706 }
707
708 /// Rebalancing action
709 #[derive(Debug, Clone, Serialize, Deserialize)]
710 pub enum RebalancingAction {
711 MoveAllocation {
712 from_node: Uuid,
713 to_node: Uuid,
714 amount_gb: f64,
715 },
716 ScaleUp {
717 node_id: Uuid,
718 additional_gb: f64,
719 },
720 ScaleDown {
721 node_id: Uuid,
722 reduction_gb: f64,
723 },
724 }
725
726 #[cfg(test)]
727 mod tests {
728 use super::*;
729
730 fn create_test_node(node_id: Uuid, capacity_gb: f64) -> VolunteerNode {
731 VolunteerNode {
732 node_id,
733 total_capacity_gb: capacity_gb,
734 allocated_space_gb: 0.0,
735 used_space_gb: 0.0,
736 performance: NodePerformance {
737 avg_response_time_ms: 50.0,
738 bandwidth_mbps: 100.0,
739 uptime_percentage: 0.99,
740 error_rate: 0.01,
741 cpu_usage: 0.3,
742 memory_usage: 0.4,
743 },
744 preferences: NodePreferences {
745 max_contribution_gb: capacity_gb,
746 preferred_hours: None,
747 bandwidth_limit_mbps: None,
748 content_preferences: vec![],
749 min_reward_rate: None,
750 },
751 location: None,
752 reliability_score: 0.95,
753 last_seen: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
754 status: NodeStatus::Active,
755 }
756 }
757
758 #[test]
759 fn test_democratic_allocation_basic() -> Result<()> {
760 let config = AllocationConfig::default();
761 let mut allocator = DemocraticAllocator::new(config);
762
763 // Register some volunteer nodes
764 let node1 = create_test_node(Uuid::new_v4(), 10.0);
765 let node2 = create_test_node(Uuid::new_v4(), 20.0);
766 let node3 = create_test_node(Uuid::new_v4(), 15.0);
767
768 allocator.register_volunteer(node1)?;
769 allocator.register_volunteer(node2)?;
770 allocator.register_volunteer(node3)?;
771
772 // Create allocation request
773 let request = AllocationRequest {
774 request_id: Uuid::new_v4(),
775 user_id: "test-user".to_string(),
776 requested_gb: 5.0,
777 priority: 5,
778 content_type: None,
779 geo_preference: None,
780 performance_requirements: PerformanceRequirements {
781 max_latency_ms: None,
782 min_bandwidth_mbps: None,
783 min_uptime: None,
784 redundancy_factor: 1,
785 },
786 created_at: 0, // Will be set by request_allocation
787 };
788
789 let request_id = allocator.request_allocation(request)?;
790 let results = allocator.process_allocations()?;
791
792 assert!(!results.is_empty());
793 assert_eq!(results[0].request_id, request_id);
794 assert!(results[0].total_allocated_gb > 0.0);
795
796 Ok(())
797 }
798
799 #[test]
800 fn test_allocation_stats() {
801 let config = AllocationConfig::default();
802 let mut allocator = DemocraticAllocator::new(config);
803
804 // Add some nodes
805 allocator.register_volunteer(create_test_node(Uuid::new_v4(), 10.0)).unwrap();
806 allocator.register_volunteer(create_test_node(Uuid::new_v4(), 20.0)).unwrap();
807
808 let stats = allocator.get_allocation_stats();
809
810 assert_eq!(stats.total_capacity_gb, 30.0);
811 assert_eq!(stats.active_nodes, 2);
812 assert_eq!(stats.utilization, 0.0); // No usage yet
813 }
814
815 #[test]
816 fn test_node_performance_score() {
817 let config = AllocationConfig::default();
818 let allocator = DemocraticAllocator::new(config);
819 let node = create_test_node(Uuid::new_v4(), 10.0);
820
821 let score = allocator.calculate_node_performance_score(&node);
822 assert!(score > 0.8); // Should be high score for good test node
823 assert!(score <= 1.0);
824 }
825 }