5.1 economic incentives foundation
Authored by
mfwolffe <wolffemf@dukes.jmu.edu>
- SHA
a3be0d117a2495a4b207f58cc8f437dd5ac41296- Parents
-
8c0fa0d - Tree
0e92aae
a3be0d1
a3be0d117a2495a4b207f58cc8f437dd5ac412968c0fa0d
0e92aae| Status | File | + | - |
|---|---|---|---|
| A |
src/allocation/democratic_allocation.rs
|
825 | 0 |
| A |
src/economics/earnings_calculator.rs
|
616 | 0 |
| A |
src/economics/market_maker.rs
|
629 | 0 |
| A |
src/economics/mod.rs
|
21 | 0 |
| A |
src/economics/network_health_minter.rs
|
516 | 0 |
| A |
src/economics/payment_processor.rs
|
822 | 0 |
| A |
src/economics/payout_scheduler.rs
|
886 | 0 |
| A |
src/economics/performance_rewards.rs
|
1034 | 0 |
| A |
src/economics/token_model.rs
|
398 | 0 |
| A |
src/economics/zephyr_coin.rs
|
561 | 0 |
| M |
src/lib.rs
|
13 | 0 |
src/allocation/democratic_allocation.rsadded@@ -0,0 +1,825 @@ | ||
| 1 | +//! Democratic space allocation algorithm for ZephyrFS | |
| 2 | +//! | |
| 3 | +//! Implements fair, transparent, and democratic allocation of storage space | |
| 4 | +//! across the network based on capacity, demand, and volunteer preferences. | |
| 5 | + | |
| 6 | +use anyhow::{Context, Result}; | |
| 7 | +use serde::{Deserialize, Serialize}; | |
| 8 | +use std::collections::{HashMap, BTreeMap}; | |
| 9 | +use std::time::{SystemTime, UNIX_EPOCH}; | |
| 10 | +use uuid::Uuid; | |
| 11 | + | |
| 12 | +/// Configuration for democratic space allocation | |
| 13 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 14 | +pub struct AllocationConfig { | |
| 15 | + /// Minimum storage allocation per volunteer (GB) | |
| 16 | + pub min_allocation_gb: f64, | |
| 17 | + /// Maximum storage allocation per volunteer (GB) | |
| 18 | + pub max_allocation_gb: f64, | |
| 19 | + /// Target network utilization percentage (0.0-1.0) | |
| 20 | + pub target_utilization: f64, | |
| 21 | + /// Weight for capacity-based allocation (0.0-1.0) | |
| 22 | + pub capacity_weight: f64, | |
| 23 | + /// Weight for demand-based allocation (0.0-1.0) | |
| 24 | + pub demand_weight: f64, | |
| 25 | + /// Weight for performance-based allocation (0.0-1.0) | |
| 26 | + pub performance_weight: f64, | |
| 27 | + /// Rebalancing frequency (seconds) | |
| 28 | + pub rebalancing_interval: u64, | |
| 29 | + /// Enable fair queuing for requests | |
| 30 | + pub enable_fair_queuing: bool, | |
| 31 | +} | |
| 32 | + | |
| 33 | +impl Default for AllocationConfig { | |
| 34 | + fn default() -> Self { | |
| 35 | + Self { | |
| 36 | + min_allocation_gb: 1.0, | |
| 37 | + max_allocation_gb: 100.0, | |
| 38 | + target_utilization: 0.75, | |
| 39 | + capacity_weight: 0.4, | |
| 40 | + demand_weight: 0.3, | |
| 41 | + performance_weight: 0.3, | |
| 42 | + rebalancing_interval: 3600, // 1 hour | |
| 43 | + enable_fair_queuing: true, | |
| 44 | + } | |
| 45 | + } | |
| 46 | +} | |
| 47 | + | |
| 48 | +/// Information about a storage volunteer node | |
| 49 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 50 | +pub struct VolunteerNode { | |
| 51 | + /// Unique node identifier | |
| 52 | + pub node_id: Uuid, | |
| 53 | + /// Total available capacity (GB) | |
| 54 | + pub total_capacity_gb: f64, | |
| 55 | + /// Currently allocated space (GB) | |
| 56 | + pub allocated_space_gb: f64, | |
| 57 | + /// Currently used space (GB) | |
| 58 | + pub used_space_gb: f64, | |
| 59 | + /// Node performance metrics | |
| 60 | + pub performance: NodePerformance, | |
| 61 | + /// Node preferences and constraints | |
| 62 | + pub preferences: NodePreferences, | |
| 63 | + /// Geographic location (for distribution) | |
| 64 | + pub location: Option<GeographicLocation>, | |
| 65 | + /// Node reliability score (0.0-1.0) | |
| 66 | + pub reliability_score: f64, | |
| 67 | + /// Last seen timestamp | |
| 68 | + pub last_seen: u64, | |
| 69 | + /// Node status | |
| 70 | + pub status: NodeStatus, | |
| 71 | +} | |
| 72 | + | |
| 73 | +/// Node performance metrics | |
| 74 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 75 | +pub struct NodePerformance { | |
| 76 | + /// Average response time (milliseconds) | |
| 77 | + pub avg_response_time_ms: f64, | |
| 78 | + /// Bandwidth capacity (Mbps) | |
| 79 | + pub bandwidth_mbps: f64, | |
| 80 | + /// Uptime percentage (0.0-1.0) | |
| 81 | + pub uptime_percentage: f64, | |
| 82 | + /// Error rate (0.0-1.0) | |
| 83 | + pub error_rate: f64, | |
| 84 | + /// CPU usage percentage (0.0-1.0) | |
| 85 | + pub cpu_usage: f64, | |
| 86 | + /// Memory usage percentage (0.0-1.0) | |
| 87 | + pub memory_usage: f64, | |
| 88 | +} | |
| 89 | + | |
| 90 | +/// Node preferences and constraints | |
| 91 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 92 | +pub struct NodePreferences { | |
| 93 | + /// Maximum space willing to provide (GB) | |
| 94 | + pub max_contribution_gb: f64, | |
| 95 | + /// Preferred operating hours (24-hour format) | |
| 96 | + pub preferred_hours: Option<(u8, u8)>, | |
| 97 | + /// Bandwidth throttling preferences | |
| 98 | + pub bandwidth_limit_mbps: Option<f64>, | |
| 99 | + /// Content type preferences | |
| 100 | + pub content_preferences: Vec<String>, | |
| 101 | + /// Minimum reward rate required | |
| 102 | + pub min_reward_rate: Option<f64>, | |
| 103 | +} | |
| 104 | + | |
| 105 | +/// Geographic location for distribution | |
| 106 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 107 | +pub struct GeographicLocation { | |
| 108 | + /// Country code | |
| 109 | + pub country: String, | |
| 110 | + /// Region/state | |
| 111 | + pub region: String, | |
| 112 | + /// City | |
| 113 | + pub city: String, | |
| 114 | + /// Latitude | |
| 115 | + pub latitude: f64, | |
| 116 | + /// Longitude | |
| 117 | + pub longitude: f64, | |
| 118 | +} | |
| 119 | + | |
| 120 | +/// Node operational status | |
| 121 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 122 | +pub enum NodeStatus { | |
| 123 | + Active, | |
| 124 | + Inactive, | |
| 125 | + Maintenance, | |
| 126 | + Overloaded, | |
| 127 | + Error(String), | |
| 128 | +} | |
| 129 | + | |
| 130 | +/// Storage allocation request | |
| 131 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 132 | +pub struct AllocationRequest { | |
| 133 | + /// Request identifier | |
| 134 | + pub request_id: Uuid, | |
| 135 | + /// Requesting user ID | |
| 136 | + pub user_id: String, | |
| 137 | + /// Requested storage amount (GB) | |
| 138 | + pub requested_gb: f64, | |
| 139 | + /// Priority level (0-10, higher is more urgent) | |
| 140 | + pub priority: u8, | |
| 141 | + /// Content type hint | |
| 142 | + pub content_type: Option<String>, | |
| 143 | + /// Geographic preference | |
| 144 | + pub geo_preference: Option<String>, | |
| 145 | + /// Performance requirements | |
| 146 | + pub performance_requirements: PerformanceRequirements, | |
| 147 | + /// Request timestamp | |
| 148 | + pub created_at: u64, | |
| 149 | +} | |
| 150 | + | |
| 151 | +/// Performance requirements for allocation | |
| 152 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 153 | +pub struct PerformanceRequirements { | |
| 154 | + /// Maximum acceptable latency (milliseconds) | |
| 155 | + pub max_latency_ms: Option<f64>, | |
| 156 | + /// Minimum bandwidth requirement (Mbps) | |
| 157 | + pub min_bandwidth_mbps: Option<f64>, | |
| 158 | + /// Minimum uptime requirement (0.0-1.0) | |
| 159 | + pub min_uptime: Option<f64>, | |
| 160 | + /// Redundancy factor (number of copies) | |
| 161 | + pub redundancy_factor: u8, | |
| 162 | +} | |
| 163 | + | |
| 164 | +/// Result of space allocation | |
| 165 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 166 | +pub struct AllocationResult { | |
| 167 | + /// Request this allocation fulfills | |
| 168 | + pub request_id: Uuid, | |
| 169 | + /// Allocated nodes and their contributions | |
| 170 | + pub allocations: Vec<NodeAllocation>, | |
| 171 | + /// Total allocated space (GB) | |
| 172 | + pub total_allocated_gb: f64, | |
| 173 | + /// Allocation quality score (0.0-1.0) | |
| 174 | + pub quality_score: f64, | |
| 175 | + /// Allocation strategy used | |
| 176 | + pub strategy: AllocationStrategy, | |
| 177 | + /// Allocation timestamp | |
| 178 | + pub allocated_at: u64, | |
| 179 | +} | |
| 180 | + | |
| 181 | +/// Individual node allocation | |
| 182 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 183 | +pub struct NodeAllocation { | |
| 184 | + /// Node receiving the allocation | |
| 185 | + pub node_id: Uuid, | |
| 186 | + /// Amount allocated to this node (GB) | |
| 187 | + pub allocated_gb: f64, | |
| 188 | + /// Expected reward for this allocation | |
| 189 | + pub reward_amount: f64, | |
| 190 | + /// Allocation priority on this node | |
| 191 | + pub priority: u8, | |
| 192 | + /// Performance score for this allocation | |
| 193 | + pub performance_score: f64, | |
| 194 | +} | |
| 195 | + | |
| 196 | +/// Allocation strategy used | |
| 197 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 198 | +pub enum AllocationStrategy { | |
| 199 | + /// Capacity-based allocation | |
| 200 | + CapacityBased, | |
| 201 | + /// Performance-based allocation | |
| 202 | + PerformanceBased, | |
| 203 | + /// Geographic distribution | |
| 204 | + GeographicDistribution, | |
| 205 | + /// Load balancing | |
| 206 | + LoadBalancing, | |
| 207 | + /// Hybrid approach | |
| 208 | + Hybrid, | |
| 209 | +} | |
| 210 | + | |
| 211 | +/// Network allocation statistics | |
| 212 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 213 | +pub struct AllocationStats { | |
| 214 | + /// Total network capacity (GB) | |
| 215 | + pub total_capacity_gb: f64, | |
| 216 | + /// Total allocated space (GB) | |
| 217 | + pub total_allocated_gb: f64, | |
| 218 | + /// Total used space (GB) | |
| 219 | + pub total_used_gb: f64, | |
| 220 | + /// Network utilization (0.0-1.0) | |
| 221 | + pub utilization: f64, | |
| 222 | + /// Number of active nodes | |
| 223 | + pub active_nodes: usize, | |
| 224 | + /// Number of pending requests | |
| 225 | + pub pending_requests: usize, | |
| 226 | + /// Average allocation quality | |
| 227 | + pub avg_quality_score: f64, | |
| 228 | +} | |
| 229 | + | |
| 230 | +/// Democratic space allocation engine | |
| 231 | +pub struct DemocraticAllocator { | |
| 232 | + config: AllocationConfig, | |
| 233 | + volunteer_nodes: HashMap<Uuid, VolunteerNode>, | |
| 234 | + pending_requests: BTreeMap<u64, AllocationRequest>, // Ordered by timestamp | |
| 235 | + active_allocations: HashMap<Uuid, AllocationResult>, | |
| 236 | + allocation_history: Vec<AllocationResult>, | |
| 237 | +} | |
| 238 | + | |
| 239 | +impl DemocraticAllocator { | |
| 240 | + /// Create new democratic allocator | |
| 241 | + pub fn new(config: AllocationConfig) -> Self { | |
| 242 | + Self { | |
| 243 | + config, | |
| 244 | + volunteer_nodes: HashMap::new(), | |
| 245 | + pending_requests: BTreeMap::new(), | |
| 246 | + active_allocations: HashMap::new(), | |
| 247 | + allocation_history: Vec::new(), | |
| 248 | + } | |
| 249 | + } | |
| 250 | + | |
| 251 | + /// Register a new volunteer node | |
| 252 | + pub fn register_volunteer(&mut self, node: VolunteerNode) -> Result<()> { | |
| 253 | + self.volunteer_nodes.insert(node.node_id, node); | |
| 254 | + Ok(()) | |
| 255 | + } | |
| 256 | + | |
| 257 | + /// Update volunteer node information | |
| 258 | + pub fn update_volunteer(&mut self, node_id: Uuid, updates: VolunteerNodeUpdate) -> Result<()> { | |
| 259 | + if let Some(node) = self.volunteer_nodes.get_mut(&node_id) { | |
| 260 | + self.apply_node_updates(node, updates); | |
| 261 | + Ok(()) | |
| 262 | + } else { | |
| 263 | + Err(anyhow::anyhow!("Volunteer node not found: {}", node_id)) | |
| 264 | + } | |
| 265 | + } | |
| 266 | + | |
| 267 | + /// Submit a storage allocation request | |
| 268 | + pub fn request_allocation(&mut self, mut request: AllocationRequest) -> Result<Uuid> { | |
| 269 | + let current_time = SystemTime::now() | |
| 270 | + .duration_since(UNIX_EPOCH) | |
| 271 | + .context("Failed to get timestamp")? | |
| 272 | + .as_secs(); | |
| 273 | + | |
| 274 | + request.created_at = current_time; | |
| 275 | + let request_id = request.request_id; | |
| 276 | + | |
| 277 | + self.pending_requests.insert(current_time, request); | |
| 278 | + Ok(request_id) | |
| 279 | + } | |
| 280 | + | |
| 281 | + /// Process pending allocation requests | |
| 282 | + pub fn process_allocations(&mut self) -> Result<Vec<AllocationResult>> { | |
| 283 | + let mut results = Vec::new(); | |
| 284 | + | |
| 285 | + // Process requests in order (FIFO with priority consideration) | |
| 286 | + let requests_to_process: Vec<_> = self.pending_requests.values().cloned().collect(); | |
| 287 | + | |
| 288 | + for request in requests_to_process { | |
| 289 | + if let Ok(result) = self.allocate_storage(&request) { | |
| 290 | + // Remove from pending | |
| 291 | + self.pending_requests.retain(|_, r| r.request_id != request.request_id); | |
| 292 | + | |
| 293 | + // Store result | |
| 294 | + self.active_allocations.insert(result.request_id, result.clone()); | |
| 295 | + self.allocation_history.push(result.clone()); | |
| 296 | + results.push(result); | |
| 297 | + } | |
| 298 | + } | |
| 299 | + | |
| 300 | + Ok(results) | |
| 301 | + } | |
| 302 | + | |
| 303 | + /// Allocate storage for a specific request | |
| 304 | + pub fn allocate_storage(&self, request: &AllocationRequest) -> Result<AllocationResult> { | |
| 305 | + // Filter eligible nodes | |
| 306 | + let eligible_nodes = self.filter_eligible_nodes(request)?; | |
| 307 | + | |
| 308 | + if eligible_nodes.is_empty() { | |
| 309 | + return Err(anyhow::anyhow!("No eligible nodes available for allocation")); | |
| 310 | + } | |
| 311 | + | |
| 312 | + // Calculate optimal allocation strategy | |
| 313 | + let strategy = self.determine_allocation_strategy(request, &eligible_nodes); | |
| 314 | + | |
| 315 | + // Perform allocation based on strategy | |
| 316 | + let allocations = match strategy { | |
| 317 | + AllocationStrategy::CapacityBased => self.allocate_by_capacity(request, &eligible_nodes)?, | |
| 318 | + AllocationStrategy::PerformanceBased => self.allocate_by_performance(request, &eligible_nodes)?, | |
| 319 | + AllocationStrategy::GeographicDistribution => self.allocate_by_geography(request, &eligible_nodes)?, | |
| 320 | + AllocationStrategy::LoadBalancing => self.allocate_by_load_balancing(request, &eligible_nodes)?, | |
| 321 | + AllocationStrategy::Hybrid => self.allocate_hybrid(request, &eligible_nodes)?, | |
| 322 | + }; | |
| 323 | + | |
| 324 | + let total_allocated_gb = allocations.iter().map(|a| a.allocated_gb).sum(); | |
| 325 | + let quality_score = self.calculate_allocation_quality(&allocations, request); | |
| 326 | + | |
| 327 | + let current_time = SystemTime::now() | |
| 328 | + .duration_since(UNIX_EPOCH) | |
| 329 | + .context("Failed to get timestamp")? | |
| 330 | + .as_secs(); | |
| 331 | + | |
| 332 | + Ok(AllocationResult { | |
| 333 | + request_id: request.request_id, | |
| 334 | + allocations, | |
| 335 | + total_allocated_gb, | |
| 336 | + quality_score, | |
| 337 | + strategy, | |
| 338 | + allocated_at: current_time, | |
| 339 | + }) | |
| 340 | + } | |
| 341 | + | |
| 342 | + /// Get network allocation statistics | |
| 343 | + pub fn get_allocation_stats(&self) -> AllocationStats { | |
| 344 | + let total_capacity_gb = self.volunteer_nodes.values() | |
| 345 | + .map(|n| n.total_capacity_gb) | |
| 346 | + .sum(); | |
| 347 | + | |
| 348 | + let total_allocated_gb = self.volunteer_nodes.values() | |
| 349 | + .map(|n| n.allocated_space_gb) | |
| 350 | + .sum(); | |
| 351 | + | |
| 352 | + let total_used_gb = self.volunteer_nodes.values() | |
| 353 | + .map(|n| n.used_space_gb) | |
| 354 | + .sum(); | |
| 355 | + | |
| 356 | + let utilization = if total_capacity_gb > 0.0 { | |
| 357 | + total_used_gb / total_capacity_gb | |
| 358 | + } else { | |
| 359 | + 0.0 | |
| 360 | + }; | |
| 361 | + | |
| 362 | + let active_nodes = self.volunteer_nodes.values() | |
| 363 | + .filter(|n| matches!(n.status, NodeStatus::Active)) | |
| 364 | + .count(); | |
| 365 | + | |
| 366 | + let avg_quality_score = if !self.allocation_history.is_empty() { | |
| 367 | + self.allocation_history.iter() | |
| 368 | + .map(|a| a.quality_score) | |
| 369 | + .sum::<f64>() / self.allocation_history.len() as f64 | |
| 370 | + } else { | |
| 371 | + 0.0 | |
| 372 | + }; | |
| 373 | + | |
| 374 | + AllocationStats { | |
| 375 | + total_capacity_gb, | |
| 376 | + total_allocated_gb, | |
| 377 | + total_used_gb, | |
| 378 | + utilization, | |
| 379 | + active_nodes, | |
| 380 | + pending_requests: self.pending_requests.len(), | |
| 381 | + avg_quality_score, | |
| 382 | + } | |
| 383 | + } | |
| 384 | + | |
| 385 | + /// Rebalance allocations across the network | |
| 386 | + pub fn rebalance_network(&mut self) -> Result<Vec<RebalancingAction>> { | |
| 387 | + let stats = self.get_allocation_stats(); | |
| 388 | + let mut actions = Vec::new(); | |
| 389 | + | |
| 390 | + // Check if rebalancing is needed | |
| 391 | + if stats.utilization > self.config.target_utilization + 0.1 || | |
| 392 | + stats.utilization < self.config.target_utilization - 0.2 { | |
| 393 | + | |
| 394 | + // Calculate optimal rebalancing moves | |
| 395 | + let rebalancing_plan = self.calculate_rebalancing_plan(&stats)?; | |
| 396 | + | |
| 397 | + for action in rebalancing_plan { | |
| 398 | + actions.push(action); | |
| 399 | + } | |
| 400 | + } | |
| 401 | + | |
| 402 | + Ok(actions) | |
| 403 | + } | |
| 404 | + | |
| 405 | + /// Filter nodes eligible for a request | |
| 406 | + fn filter_eligible_nodes(&self, request: &AllocationRequest) -> Result<Vec<&VolunteerNode>> { | |
| 407 | + Ok(self.volunteer_nodes.values() | |
| 408 | + .filter(|node| { | |
| 409 | + // Basic eligibility checks | |
| 410 | + matches!(node.status, NodeStatus::Active) && | |
| 411 | + node.total_capacity_gb - node.allocated_space_gb >= 0.1 && // At least 100MB free | |
| 412 | + node.performance.uptime_percentage >= 0.9 && // At least 90% uptime | |
| 413 | + self.meets_performance_requirements(node, &request.performance_requirements) | |
| 414 | + }) | |
| 415 | + .collect()) | |
| 416 | + } | |
| 417 | + | |
| 418 | + /// Check if node meets performance requirements | |
| 419 | + fn meets_performance_requirements(&self, node: &VolunteerNode, reqs: &PerformanceRequirements) -> bool { | |
| 420 | + if let Some(max_latency) = reqs.max_latency_ms { | |
| 421 | + if node.performance.avg_response_time_ms > max_latency { | |
| 422 | + return false; | |
| 423 | + } | |
| 424 | + } | |
| 425 | + | |
| 426 | + if let Some(min_bandwidth) = reqs.min_bandwidth_mbps { | |
| 427 | + if node.performance.bandwidth_mbps < min_bandwidth { | |
| 428 | + return false; | |
| 429 | + } | |
| 430 | + } | |
| 431 | + | |
| 432 | + if let Some(min_uptime) = reqs.min_uptime { | |
| 433 | + if node.performance.uptime_percentage < min_uptime { | |
| 434 | + return false; | |
| 435 | + } | |
| 436 | + } | |
| 437 | + | |
| 438 | + true | |
| 439 | + } | |
| 440 | + | |
| 441 | + /// Determine optimal allocation strategy | |
| 442 | + fn determine_allocation_strategy( | |
| 443 | + &self, | |
| 444 | + request: &AllocationRequest, | |
| 445 | + eligible_nodes: &[&VolunteerNode], | |
| 446 | + ) -> AllocationStrategy { | |
| 447 | + // Simple heuristic-based strategy selection | |
| 448 | + if request.performance_requirements.min_bandwidth_mbps.is_some() || | |
| 449 | + request.performance_requirements.max_latency_ms.is_some() { | |
| 450 | + AllocationStrategy::PerformanceBased | |
| 451 | + } else if request.geo_preference.is_some() { | |
| 452 | + AllocationStrategy::GeographicDistribution | |
| 453 | + } else if eligible_nodes.len() > 10 { | |
| 454 | + AllocationStrategy::LoadBalancing | |
| 455 | + } else { | |
| 456 | + AllocationStrategy::Hybrid | |
| 457 | + } | |
| 458 | + } | |
| 459 | + | |
| 460 | + /// Allocate storage based on node capacity | |
| 461 | + fn allocate_by_capacity( | |
| 462 | + &self, | |
| 463 | + request: &AllocationRequest, | |
| 464 | + eligible_nodes: &[&VolunteerNode], | |
| 465 | + ) -> Result<Vec<NodeAllocation>> { | |
| 466 | + let mut allocations = Vec::new(); | |
| 467 | + let mut remaining_gb = request.requested_gb; | |
| 468 | + | |
| 469 | + // Sort nodes by available capacity (descending) | |
| 470 | + let mut sorted_nodes: Vec<_> = eligible_nodes.iter().collect(); | |
| 471 | + sorted_nodes.sort_by(|a, b| { | |
| 472 | + let a_available = a.total_capacity_gb - a.allocated_space_gb; | |
| 473 | + let b_available = b.total_capacity_gb - b.allocated_space_gb; | |
| 474 | + b_available.partial_cmp(&a_available).unwrap() | |
| 475 | + }); | |
| 476 | + | |
| 477 | + for node in sorted_nodes { | |
| 478 | + if remaining_gb <= 0.0 { | |
| 479 | + break; | |
| 480 | + } | |
| 481 | + | |
| 482 | + let available_gb = node.total_capacity_gb - node.allocated_space_gb; | |
| 483 | + let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb); | |
| 484 | + | |
| 485 | + if to_allocate >= self.config.min_allocation_gb { | |
| 486 | + allocations.push(NodeAllocation { | |
| 487 | + node_id: node.node_id, | |
| 488 | + allocated_gb: to_allocate, | |
| 489 | + reward_amount: to_allocate * 0.01, // $0.01 per GB | |
| 490 | + priority: request.priority, | |
| 491 | + performance_score: self.calculate_node_performance_score(node), | |
| 492 | + }); | |
| 493 | + | |
| 494 | + remaining_gb -= to_allocate; | |
| 495 | + } | |
| 496 | + } | |
| 497 | + | |
| 498 | + Ok(allocations) | |
| 499 | + } | |
| 500 | + | |
| 501 | + /// Allocate storage based on node performance | |
| 502 | + fn allocate_by_performance( | |
| 503 | + &self, | |
| 504 | + request: &AllocationRequest, | |
| 505 | + eligible_nodes: &[&VolunteerNode], | |
| 506 | + ) -> Result<Vec<NodeAllocation>> { | |
| 507 | + let mut allocations = Vec::new(); | |
| 508 | + let mut remaining_gb = request.requested_gb; | |
| 509 | + | |
| 510 | + // Sort nodes by performance score (descending) | |
| 511 | + let mut sorted_nodes: Vec<_> = eligible_nodes.iter() | |
| 512 | + .map(|node| (node, self.calculate_node_performance_score(node))) | |
| 513 | + .collect(); | |
| 514 | + sorted_nodes.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); | |
| 515 | + | |
| 516 | + for (node, performance_score) in sorted_nodes { | |
| 517 | + if remaining_gb <= 0.0 { | |
| 518 | + break; | |
| 519 | + } | |
| 520 | + | |
| 521 | + let available_gb = node.total_capacity_gb - node.allocated_space_gb; | |
| 522 | + let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb); | |
| 523 | + | |
| 524 | + if to_allocate >= self.config.min_allocation_gb { | |
| 525 | + allocations.push(NodeAllocation { | |
| 526 | + node_id: node.node_id, | |
| 527 | + allocated_gb: to_allocate, | |
| 528 | + reward_amount: to_allocate * 0.015 * performance_score, // Performance bonus | |
| 529 | + priority: request.priority, | |
| 530 | + performance_score, | |
| 531 | + }); | |
| 532 | + | |
| 533 | + remaining_gb -= to_allocate; | |
| 534 | + } | |
| 535 | + } | |
| 536 | + | |
| 537 | + Ok(allocations) | |
| 538 | + } | |
| 539 | + | |
| 540 | + /// Allocate storage based on geographic distribution | |
| 541 | + fn allocate_by_geography( | |
| 542 | + &self, | |
| 543 | + request: &AllocationRequest, | |
| 544 | + eligible_nodes: &[&VolunteerNode], | |
| 545 | + ) -> Result<Vec<NodeAllocation>> { | |
| 546 | + // For now, fall back to capacity-based allocation | |
| 547 | + // In a real implementation, this would consider geographic distribution | |
| 548 | + self.allocate_by_capacity(request, eligible_nodes) | |
| 549 | + } | |
| 550 | + | |
| 551 | + /// Allocate storage using load balancing | |
| 552 | + fn allocate_by_load_balancing( | |
| 553 | + &self, | |
| 554 | + request: &AllocationRequest, | |
| 555 | + eligible_nodes: &[&VolunteerNode], | |
| 556 | + ) -> Result<Vec<NodeAllocation>> { | |
| 557 | + let mut allocations = Vec::new(); | |
| 558 | + let mut remaining_gb = request.requested_gb; | |
| 559 | + | |
| 560 | + // Sort nodes by current utilization (ascending) | |
| 561 | + let mut sorted_nodes: Vec<_> = eligible_nodes.iter().collect(); | |
| 562 | + sorted_nodes.sort_by(|a, b| { | |
| 563 | + let a_utilization = a.used_space_gb / a.total_capacity_gb; | |
| 564 | + let b_utilization = b.used_space_gb / b.total_capacity_gb; | |
| 565 | + a_utilization.partial_cmp(&b_utilization).unwrap() | |
| 566 | + }); | |
| 567 | + | |
| 568 | + for node in sorted_nodes { | |
| 569 | + if remaining_gb <= 0.0 { | |
| 570 | + break; | |
| 571 | + } | |
| 572 | + | |
| 573 | + let available_gb = node.total_capacity_gb - node.allocated_space_gb; | |
| 574 | + let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb); | |
| 575 | + | |
| 576 | + if to_allocate >= self.config.min_allocation_gb { | |
| 577 | + allocations.push(NodeAllocation { | |
| 578 | + node_id: node.node_id, | |
| 579 | + allocated_gb: to_allocate, | |
| 580 | + reward_amount: to_allocate * 0.01, | |
| 581 | + priority: request.priority, | |
| 582 | + performance_score: self.calculate_node_performance_score(node), | |
| 583 | + }); | |
| 584 | + | |
| 585 | + remaining_gb -= to_allocate; | |
| 586 | + } | |
| 587 | + } | |
| 588 | + | |
| 589 | + Ok(allocations) | |
| 590 | + } | |
| 591 | + | |
| 592 | + /// Allocate storage using hybrid approach | |
| 593 | + fn allocate_hybrid( | |
| 594 | + &self, | |
| 595 | + request: &AllocationRequest, | |
| 596 | + eligible_nodes: &[&VolunteerNode], | |
| 597 | + ) -> Result<Vec<NodeAllocation>> { | |
| 598 | + // Hybrid scoring based on capacity, performance, and load | |
| 599 | + let mut node_scores: Vec<_> = eligible_nodes.iter() | |
| 600 | + .map(|node| { | |
| 601 | + let available_gb = node.total_capacity_gb - node.allocated_space_gb; | |
| 602 | + let utilization = node.used_space_gb / node.total_capacity_gb; | |
| 603 | + let performance_score = self.calculate_node_performance_score(node); | |
| 604 | + | |
| 605 | + let capacity_score = available_gb / self.config.max_allocation_gb; | |
| 606 | + let load_score = 1.0 - utilization; | |
| 607 | + | |
| 608 | + let hybrid_score = | |
| 609 | + self.config.capacity_weight * capacity_score + | |
| 610 | + self.config.performance_weight * performance_score + | |
| 611 | + (1.0 - self.config.capacity_weight - self.config.performance_weight) * load_score; | |
| 612 | + | |
| 613 | + (node, hybrid_score) | |
| 614 | + }) | |
| 615 | + .collect(); | |
| 616 | + | |
| 617 | + // Sort by hybrid score (descending) | |
| 618 | + node_scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap()); | |
| 619 | + | |
| 620 | + let mut allocations = Vec::new(); | |
| 621 | + let mut remaining_gb = request.requested_gb; | |
| 622 | + | |
| 623 | + for (node, score) in node_scores { | |
| 624 | + if remaining_gb <= 0.0 { | |
| 625 | + break; | |
| 626 | + } | |
| 627 | + | |
| 628 | + let available_gb = node.total_capacity_gb - node.allocated_space_gb; | |
| 629 | + let to_allocate = remaining_gb.min(available_gb).min(self.config.max_allocation_gb); | |
| 630 | + | |
| 631 | + if to_allocate >= self.config.min_allocation_gb { | |
| 632 | + allocations.push(NodeAllocation { | |
| 633 | + node_id: node.node_id, | |
| 634 | + allocated_gb: to_allocate, | |
| 635 | + reward_amount: to_allocate * 0.012 * score, // Score-based reward | |
| 636 | + priority: request.priority, | |
| 637 | + performance_score: self.calculate_node_performance_score(node), | |
| 638 | + }); | |
| 639 | + | |
| 640 | + remaining_gb -= to_allocate; | |
| 641 | + } | |
| 642 | + } | |
| 643 | + | |
| 644 | + Ok(allocations) | |
| 645 | + } | |
| 646 | + | |
| 647 | + /// Calculate node performance score | |
| 648 | + fn calculate_node_performance_score(&self, node: &VolunteerNode) -> f64 { | |
| 649 | + let latency_score = (1000.0 - node.performance.avg_response_time_ms).max(0.0) / 1000.0; | |
| 650 | + let bandwidth_score = (node.performance.bandwidth_mbps / 100.0).min(1.0); | |
| 651 | + let uptime_score = node.performance.uptime_percentage; | |
| 652 | + let reliability_score = node.reliability_score; | |
| 653 | + let error_score = 1.0 - node.performance.error_rate; | |
| 654 | + | |
| 655 | + (latency_score + bandwidth_score + uptime_score + reliability_score + error_score) / 5.0 | |
| 656 | + } | |
| 657 | + | |
| 658 | + /// Calculate quality score for an allocation | |
| 659 | + fn calculate_allocation_quality(&self, allocations: &[NodeAllocation], request: &AllocationRequest) -> f64 { | |
| 660 | + if allocations.is_empty() { | |
| 661 | + return 0.0; | |
| 662 | + } | |
| 663 | + | |
| 664 | + let total_allocated = allocations.iter().map(|a| a.allocated_gb).sum::<f64>(); | |
| 665 | + let fulfillment_ratio = (total_allocated / request.requested_gb).min(1.0); | |
| 666 | + | |
| 667 | + let avg_performance = allocations.iter() | |
| 668 | + .map(|a| a.performance_score) | |
| 669 | + .sum::<f64>() / allocations.len() as f64; | |
| 670 | + | |
| 671 | + let diversity_bonus = if allocations.len() > 1 { 0.1 } else { 0.0 }; | |
| 672 | + | |
| 673 | + (fulfillment_ratio * 0.6 + avg_performance * 0.3 + diversity_bonus).min(1.0) | |
| 674 | + } | |
| 675 | + | |
| 676 | + /// Apply updates to a volunteer node | |
| 677 | + fn apply_node_updates(&self, node: &mut VolunteerNode, updates: VolunteerNodeUpdate) { | |
| 678 | + if let Some(capacity) = updates.total_capacity_gb { | |
| 679 | + node.total_capacity_gb = capacity; | |
| 680 | + } | |
| 681 | + if let Some(used) = updates.used_space_gb { | |
| 682 | + node.used_space_gb = used; | |
| 683 | + } | |
| 684 | + if let Some(performance) = updates.performance { | |
| 685 | + node.performance = performance; | |
| 686 | + } | |
| 687 | + if let Some(status) = updates.status { | |
| 688 | + node.status = status; | |
| 689 | + } | |
| 690 | + } | |
| 691 | + | |
| 692 | + /// Calculate rebalancing plan | |
| 693 | + fn calculate_rebalancing_plan(&self, _stats: &AllocationStats) -> Result<Vec<RebalancingAction>> { | |
| 694 | + // Simplified rebalancing - in production this would be more sophisticated | |
| 695 | + Ok(vec![]) | |
| 696 | + } | |
| 697 | +} | |
| 698 | + | |
| 699 | +/// Updates for volunteer node information | |
| 700 | +#[derive(Debug, Clone)] | |
| 701 | +pub struct VolunteerNodeUpdate { | |
| 702 | + pub total_capacity_gb: Option<f64>, | |
| 703 | + pub used_space_gb: Option<f64>, | |
| 704 | + pub performance: Option<NodePerformance>, | |
| 705 | + pub status: Option<NodeStatus>, | |
| 706 | +} | |
| 707 | + | |
| 708 | +/// Rebalancing action | |
| 709 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 710 | +pub enum RebalancingAction { | |
| 711 | + MoveAllocation { | |
| 712 | + from_node: Uuid, | |
| 713 | + to_node: Uuid, | |
| 714 | + amount_gb: f64, | |
| 715 | + }, | |
| 716 | + ScaleUp { | |
| 717 | + node_id: Uuid, | |
| 718 | + additional_gb: f64, | |
| 719 | + }, | |
| 720 | + ScaleDown { | |
| 721 | + node_id: Uuid, | |
| 722 | + reduction_gb: f64, | |
| 723 | + }, | |
| 724 | +} | |
| 725 | + | |
| 726 | +#[cfg(test)] | |
| 727 | +mod tests { | |
| 728 | + use super::*; | |
| 729 | + | |
| 730 | + fn create_test_node(node_id: Uuid, capacity_gb: f64) -> VolunteerNode { | |
| 731 | + VolunteerNode { | |
| 732 | + node_id, | |
| 733 | + total_capacity_gb: capacity_gb, | |
| 734 | + allocated_space_gb: 0.0, | |
| 735 | + used_space_gb: 0.0, | |
| 736 | + performance: NodePerformance { | |
| 737 | + avg_response_time_ms: 50.0, | |
| 738 | + bandwidth_mbps: 100.0, | |
| 739 | + uptime_percentage: 0.99, | |
| 740 | + error_rate: 0.01, | |
| 741 | + cpu_usage: 0.3, | |
| 742 | + memory_usage: 0.4, | |
| 743 | + }, | |
| 744 | + preferences: NodePreferences { | |
| 745 | + max_contribution_gb: capacity_gb, | |
| 746 | + preferred_hours: None, | |
| 747 | + bandwidth_limit_mbps: None, | |
| 748 | + content_preferences: vec![], | |
| 749 | + min_reward_rate: None, | |
| 750 | + }, | |
| 751 | + location: None, | |
| 752 | + reliability_score: 0.95, | |
| 753 | + last_seen: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(), | |
| 754 | + status: NodeStatus::Active, | |
| 755 | + } | |
| 756 | + } | |
| 757 | + | |
| 758 | + #[test] | |
| 759 | + fn test_democratic_allocation_basic() -> Result<()> { | |
| 760 | + let config = AllocationConfig::default(); | |
| 761 | + let mut allocator = DemocraticAllocator::new(config); | |
| 762 | + | |
| 763 | + // Register some volunteer nodes | |
| 764 | + let node1 = create_test_node(Uuid::new_v4(), 10.0); | |
| 765 | + let node2 = create_test_node(Uuid::new_v4(), 20.0); | |
| 766 | + let node3 = create_test_node(Uuid::new_v4(), 15.0); | |
| 767 | + | |
| 768 | + allocator.register_volunteer(node1)?; | |
| 769 | + allocator.register_volunteer(node2)?; | |
| 770 | + allocator.register_volunteer(node3)?; | |
| 771 | + | |
| 772 | + // Create allocation request | |
| 773 | + let request = AllocationRequest { | |
| 774 | + request_id: Uuid::new_v4(), | |
| 775 | + user_id: "test-user".to_string(), | |
| 776 | + requested_gb: 5.0, | |
| 777 | + priority: 5, | |
| 778 | + content_type: None, | |
| 779 | + geo_preference: None, | |
| 780 | + performance_requirements: PerformanceRequirements { | |
| 781 | + max_latency_ms: None, | |
| 782 | + min_bandwidth_mbps: None, | |
| 783 | + min_uptime: None, | |
| 784 | + redundancy_factor: 1, | |
| 785 | + }, | |
| 786 | + created_at: 0, // Will be set by request_allocation | |
| 787 | + }; | |
| 788 | + | |
| 789 | + let request_id = allocator.request_allocation(request)?; | |
| 790 | + let results = allocator.process_allocations()?; | |
| 791 | + | |
| 792 | + assert!(!results.is_empty()); | |
| 793 | + assert_eq!(results[0].request_id, request_id); | |
| 794 | + assert!(results[0].total_allocated_gb > 0.0); | |
| 795 | + | |
| 796 | + Ok(()) | |
| 797 | + } | |
| 798 | + | |
| 799 | + #[test] | |
| 800 | + fn test_allocation_stats() { | |
| 801 | + let config = AllocationConfig::default(); | |
| 802 | + let mut allocator = DemocraticAllocator::new(config); | |
| 803 | + | |
| 804 | + // Add some nodes | |
| 805 | + allocator.register_volunteer(create_test_node(Uuid::new_v4(), 10.0)).unwrap(); | |
| 806 | + allocator.register_volunteer(create_test_node(Uuid::new_v4(), 20.0)).unwrap(); | |
| 807 | + | |
| 808 | + let stats = allocator.get_allocation_stats(); | |
| 809 | + | |
| 810 | + assert_eq!(stats.total_capacity_gb, 30.0); | |
| 811 | + assert_eq!(stats.active_nodes, 2); | |
| 812 | + assert_eq!(stats.utilization, 0.0); // No usage yet | |
| 813 | + } | |
| 814 | + | |
| 815 | + #[test] | |
| 816 | + fn test_node_performance_score() { | |
| 817 | + let config = AllocationConfig::default(); | |
| 818 | + let allocator = DemocraticAllocator::new(config); | |
| 819 | + let node = create_test_node(Uuid::new_v4(), 10.0); | |
| 820 | + | |
| 821 | + let score = allocator.calculate_node_performance_score(&node); | |
| 822 | + assert!(score > 0.8); // Should be high score for good test node | |
| 823 | + assert!(score <= 1.0); | |
| 824 | + } | |
| 825 | +} | |
src/economics/earnings_calculator.rsadded@@ -0,0 +1,616 @@ | ||
| 1 | +//! Real-Time Earnings Calculation System | |
| 2 | +//! | |
| 3 | +//! Comprehensive earnings tracking and calculation for ZephyrFS volunteers | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::{HashMap, VecDeque}; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | + | |
| 10 | +use super::token_model::{RewardReason, NetworkHealthMetrics}; | |
| 11 | + | |
| 12 | +/// Real-time earnings calculator for volunteers | |
| 13 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 14 | +pub struct EarningsCalculator { | |
| 15 | + /// Volunteer performance tracking | |
| 16 | + pub volunteer_metrics: HashMap<String, VolunteerMetrics>, | |
| 17 | + /// Earnings rates configuration | |
| 18 | + pub rates: EarningsRates, | |
| 19 | + /// Bonus multipliers | |
| 20 | + pub bonuses: BonusStructure, | |
| 21 | + /// Performance history for analytics | |
| 22 | + pub performance_history: HashMap<String, VecDeque<PerformanceRecord>>, | |
| 23 | + /// Network-wide metrics | |
| 24 | + pub network_metrics: NetworkHealthMetrics, | |
| 25 | + /// Daily earnings summary | |
| 26 | + pub daily_earnings: HashMap<String, DailyEarnings>, | |
| 27 | +} | |
| 28 | + | |
| 29 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 30 | +pub struct VolunteerMetrics { | |
| 31 | + pub volunteer_id: String, | |
| 32 | + pub total_storage_gb: u64, | |
| 33 | + pub available_storage_gb: u64, | |
| 34 | + pub used_storage_gb: u64, | |
| 35 | + pub uptime_hours_24h: f64, | |
| 36 | + pub uptime_percentage: f64, | |
| 37 | + pub response_time_ms: u64, | |
| 38 | + pub transfer_speed_mbps: f64, | |
| 39 | + pub successful_transfers: u64, | |
| 40 | + pub failed_transfers: u64, | |
| 41 | + pub geographic_region: GeographicRegion, | |
| 42 | + pub connection_quality: ConnectionQuality, | |
| 43 | + pub reliability_score: f64, | |
| 44 | + pub last_seen: DateTime<Utc>, | |
| 45 | + pub joined_at: DateTime<Utc>, | |
| 46 | +} | |
| 47 | + | |
| 48 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 49 | +pub struct EarningsRates { | |
| 50 | + /// Base rate per GB per day (in wei-equivalent tokens) | |
| 51 | + pub base_storage_rate: u64, | |
| 52 | + /// Uptime bonus rate (per hour of 100% uptime) | |
| 53 | + pub uptime_bonus_rate: u64, | |
| 54 | + /// Performance bonus rate (based on speed/reliability) | |
| 55 | + pub performance_bonus_rate: u64, | |
| 56 | + /// Geographic diversity bonus | |
| 57 | + pub geographic_bonus_rate: u64, | |
| 58 | + /// Longevity bonus (tenure rewards) | |
| 59 | + pub longevity_bonus_rate: u64, | |
| 60 | + /// Network contribution bonus | |
| 61 | + pub network_contribution_rate: u64, | |
| 62 | +} | |
| 63 | + | |
| 64 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 65 | +pub struct BonusStructure { | |
| 66 | + /// Uptime thresholds and multipliers | |
| 67 | + pub uptime_tiers: Vec<UptimeTier>, | |
| 68 | + /// Performance score multipliers | |
| 69 | + pub performance_multipliers: PerformanceMultipliers, | |
| 70 | + /// Geographic diversity bonuses | |
| 71 | + pub geographic_bonuses: HashMap<GeographicRegion, f64>, | |
| 72 | + /// Tenure bonuses (loyalty rewards) | |
| 73 | + pub tenure_bonuses: Vec<TenureTier>, | |
| 74 | + /// Network health bonuses | |
| 75 | + pub network_health_bonus: f64, | |
| 76 | +} | |
| 77 | + | |
| 78 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 79 | +pub struct UptimeTier { | |
| 80 | + pub threshold_percent: f64, | |
| 81 | + pub multiplier: f64, | |
| 82 | + pub name: String, | |
| 83 | +} | |
| 84 | + | |
| 85 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 86 | +pub struct PerformanceMultipliers { | |
| 87 | + pub excellent_speed: f64, // >100 Mbps | |
| 88 | + pub good_speed: f64, // 50-100 Mbps | |
| 89 | + pub average_speed: f64, // 10-50 Mbps | |
| 90 | + pub low_response_time: f64, // <100ms | |
| 91 | + pub high_reliability: f64, // >99% success rate | |
| 92 | +} | |
| 93 | + | |
| 94 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 95 | +pub struct TenureTier { | |
| 96 | + pub months: u32, | |
| 97 | + pub multiplier: f64, | |
| 98 | + pub name: String, | |
| 99 | +} | |
| 100 | + | |
| 101 | +#[derive(Debug, Clone, PartialEq, Hash, Serialize, Deserialize)] | |
| 102 | +pub enum GeographicRegion { | |
| 103 | + NorthAmerica, | |
| 104 | + Europe, | |
| 105 | + Asia, | |
| 106 | + SouthAmerica, | |
| 107 | + Africa, | |
| 108 | + Oceania, | |
| 109 | + Rare, // Underrepresented regions | |
| 110 | +} | |
| 111 | + | |
| 112 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 113 | +pub enum ConnectionQuality { | |
| 114 | + Excellent, // Fiber, low latency | |
| 115 | + Good, // Broadband, stable | |
| 116 | + Fair, // Adequate speed | |
| 117 | + Poor, // Slow or unstable | |
| 118 | +} | |
| 119 | + | |
| 120 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 121 | +pub struct PerformanceRecord { | |
| 122 | + pub timestamp: DateTime<Utc>, | |
| 123 | + pub uptime_hours: f64, | |
| 124 | + pub storage_provided_gb: u64, | |
| 125 | + pub transfer_speed_mbps: f64, | |
| 126 | + pub response_time_ms: u64, | |
| 127 | + pub success_rate: f64, | |
| 128 | + pub earnings_tokens: u64, | |
| 129 | + pub bonus_tokens: u64, | |
| 130 | +} | |
| 131 | + | |
| 132 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 133 | +pub struct DailyEarnings { | |
| 134 | + pub date: DateTime<Utc>, | |
| 135 | + pub base_earnings: u64, | |
| 136 | + pub uptime_bonus: u64, | |
| 137 | + pub performance_bonus: u64, | |
| 138 | + pub geographic_bonus: u64, | |
| 139 | + pub longevity_bonus: u64, | |
| 140 | + pub network_bonus: u64, | |
| 141 | + pub total_earnings: u64, | |
| 142 | + pub storage_gb_hours: u64, | |
| 143 | + pub actual_uptime_hours: f64, | |
| 144 | +} | |
| 145 | + | |
| 146 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 147 | +pub struct EarningsProjection { | |
| 148 | + pub daily_estimate: u64, | |
| 149 | + pub weekly_estimate: u64, | |
| 150 | + pub monthly_estimate: u64, | |
| 151 | + pub annual_estimate: u64, | |
| 152 | + pub factors: ProjectionFactors, | |
| 153 | +} | |
| 154 | + | |
| 155 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 156 | +pub struct ProjectionFactors { | |
| 157 | + pub current_performance: f64, | |
| 158 | + pub network_demand: f64, | |
| 159 | + pub seasonal_adjustment: f64, | |
| 160 | + pub growth_factor: f64, | |
| 161 | +} | |
| 162 | + | |
| 163 | +impl Default for EarningsRates { | |
| 164 | + fn default() -> Self { | |
| 165 | + Self { | |
| 166 | + base_storage_rate: 20_000_000_000_000_000, // 0.02 tokens per GB per day | |
| 167 | + uptime_bonus_rate: 1_000_000_000_000_000, // 0.001 tokens per hour | |
| 168 | + performance_bonus_rate: 5_000_000_000_000_000, // 0.005 tokens for high performance | |
| 169 | + geographic_bonus_rate: 3_000_000_000_000_000, // 0.003 tokens for rare regions | |
| 170 | + longevity_bonus_rate: 2_000_000_000_000_000, // 0.002 tokens longevity bonus | |
| 171 | + network_contribution_rate: 1_000_000_000_000_000, // 0.001 tokens network contribution | |
| 172 | + } | |
| 173 | + } | |
| 174 | +} | |
| 175 | + | |
| 176 | +impl Default for BonusStructure { | |
| 177 | + fn default() -> Self { | |
| 178 | + Self { | |
| 179 | + uptime_tiers: vec![ | |
| 180 | + UptimeTier { threshold_percent: 99.5, multiplier: 2.0, name: "Platinum".to_string() }, | |
| 181 | + UptimeTier { threshold_percent: 98.0, multiplier: 1.5, name: "Gold".to_string() }, | |
| 182 | + UptimeTier { threshold_percent: 95.0, multiplier: 1.2, name: "Silver".to_string() }, | |
| 183 | + UptimeTier { threshold_percent: 90.0, multiplier: 1.0, name: "Bronze".to_string() }, | |
| 184 | + ], | |
| 185 | + performance_multipliers: PerformanceMultipliers { | |
| 186 | + excellent_speed: 1.3, | |
| 187 | + good_speed: 1.1, | |
| 188 | + average_speed: 1.0, | |
| 189 | + low_response_time: 1.2, | |
| 190 | + high_reliability: 1.25, | |
| 191 | + }, | |
| 192 | + geographic_bonuses: HashMap::from([ | |
| 193 | + (GeographicRegion::Rare, 0.5), | |
| 194 | + (GeographicRegion::Africa, 0.3), | |
| 195 | + (GeographicRegion::SouthAmerica, 0.2), | |
| 196 | + (GeographicRegion::Oceania, 0.2), | |
| 197 | + (GeographicRegion::Asia, 0.1), | |
| 198 | + (GeographicRegion::Europe, 0.05), | |
| 199 | + (GeographicRegion::NorthAmerica, 0.0), | |
| 200 | + ]), | |
| 201 | + tenure_bonuses: vec![ | |
| 202 | + TenureTier { months: 24, multiplier: 1.5, name: "Veteran".to_string() }, | |
| 203 | + TenureTier { months: 12, multiplier: 1.3, name: "Experienced".to_string() }, | |
| 204 | + TenureTier { months: 6, multiplier: 1.15, name: "Established".to_string() }, | |
| 205 | + TenureTier { months: 3, multiplier: 1.05, name: "Regular".to_string() }, | |
| 206 | + ], | |
| 207 | + network_health_bonus: 0.1, // 10% bonus when network is healthy | |
| 208 | + } | |
| 209 | + } | |
| 210 | +} | |
| 211 | + | |
| 212 | +impl EarningsCalculator { | |
| 213 | + /// Create new earnings calculator | |
| 214 | + pub fn new() -> Self { | |
| 215 | + Self { | |
| 216 | + volunteer_metrics: HashMap::new(), | |
| 217 | + rates: EarningsRates::default(), | |
| 218 | + bonuses: BonusStructure::default(), | |
| 219 | + performance_history: HashMap::new(), | |
| 220 | + network_metrics: NetworkHealthMetrics { | |
| 221 | + total_capacity_gb: 0, | |
| 222 | + active_volunteers: 0, | |
| 223 | + utilization_rate: 0.0, | |
| 224 | + average_uptime: 0.0, | |
| 225 | + geographic_diversity: 0.0, | |
| 226 | + data_durability: 0.0, | |
| 227 | + }, | |
| 228 | + daily_earnings: HashMap::new(), | |
| 229 | + } | |
| 230 | + } | |
| 231 | + | |
| 232 | + /// Update volunteer metrics | |
| 233 | + pub fn update_volunteer_metrics(&mut self, metrics: VolunteerMetrics) { | |
| 234 | + let volunteer_id = metrics.volunteer_id.clone(); | |
| 235 | + | |
| 236 | + // Record performance history | |
| 237 | + let record = PerformanceRecord { | |
| 238 | + timestamp: Utc::now(), | |
| 239 | + uptime_hours: metrics.uptime_hours_24h, | |
| 240 | + storage_provided_gb: metrics.used_storage_gb, | |
| 241 | + transfer_speed_mbps: metrics.transfer_speed_mbps, | |
| 242 | + response_time_ms: metrics.response_time_ms, | |
| 243 | + success_rate: if metrics.successful_transfers + metrics.failed_transfers > 0 { | |
| 244 | + metrics.successful_transfers as f64 / (metrics.successful_transfers + metrics.failed_transfers) as f64 | |
| 245 | + } else { | |
| 246 | + 1.0 | |
| 247 | + }, | |
| 248 | + earnings_tokens: 0, // Will be calculated | |
| 249 | + bonus_tokens: 0, | |
| 250 | + }; | |
| 251 | + | |
| 252 | + self.performance_history | |
| 253 | + .entry(volunteer_id.clone()) | |
| 254 | + .or_insert_with(|| VecDeque::with_capacity(720)) // 30 days of hourly records | |
| 255 | + .push_back(record); | |
| 256 | + | |
| 257 | + // Keep only last 30 days | |
| 258 | + if let Some(history) = self.performance_history.get_mut(&volunteer_id) { | |
| 259 | + while history.len() > 720 { | |
| 260 | + history.pop_front(); | |
| 261 | + } | |
| 262 | + } | |
| 263 | + | |
| 264 | + self.volunteer_metrics.insert(volunteer_id, metrics); | |
| 265 | + } | |
| 266 | + | |
| 267 | + /// Calculate real-time earnings for a volunteer | |
| 268 | + pub fn calculate_real_time_earnings(&self, volunteer_id: &str) -> Result<u64> { | |
| 269 | + let metrics = self.volunteer_metrics.get(volunteer_id) | |
| 270 | + .ok_or_else(|| anyhow::anyhow!("Volunteer metrics not found"))?; | |
| 271 | + | |
| 272 | + // Calculate base storage earnings | |
| 273 | + let base_earnings = self.calculate_base_storage_earnings(metrics); | |
| 274 | + | |
| 275 | + // Calculate uptime bonus | |
| 276 | + let uptime_bonus = self.calculate_uptime_bonus(metrics); | |
| 277 | + | |
| 278 | + // Calculate performance bonus | |
| 279 | + let performance_bonus = self.calculate_performance_bonus(metrics); | |
| 280 | + | |
| 281 | + // Calculate geographic bonus | |
| 282 | + let geographic_bonus = self.calculate_geographic_bonus(metrics); | |
| 283 | + | |
| 284 | + // Calculate longevity bonus | |
| 285 | + let longevity_bonus = self.calculate_longevity_bonus(metrics); | |
| 286 | + | |
| 287 | + // Calculate network health bonus | |
| 288 | + let network_bonus = self.calculate_network_health_bonus(base_earnings); | |
| 289 | + | |
| 290 | + let total_earnings = base_earnings + uptime_bonus + performance_bonus | |
| 291 | + + geographic_bonus + longevity_bonus + network_bonus; | |
| 292 | + | |
| 293 | + Ok(total_earnings) | |
| 294 | + } | |
| 295 | + | |
| 296 | + /// Calculate base storage earnings | |
| 297 | + fn calculate_base_storage_earnings(&self, metrics: &VolunteerMetrics) -> u64 { | |
| 298 | + // Earnings based on storage provided and utilization | |
| 299 | + let utilization_factor = if metrics.total_storage_gb > 0 { | |
| 300 | + metrics.used_storage_gb as f64 / metrics.total_storage_gb as f64 | |
| 301 | + } else { | |
| 302 | + 0.0 | |
| 303 | + }; | |
| 304 | + | |
| 305 | + // Base rate * storage * utilization (with minimum guarantee) | |
| 306 | + let base = metrics.total_storage_gb * self.rates.base_storage_rate; | |
| 307 | + let utilization_earnings = (base as f64 * utilization_factor) as u64; | |
| 308 | + | |
| 309 | + // Guarantee at least 20% of base rate even with no utilization | |
| 310 | + let minimum = base / 5; | |
| 311 | + | |
| 312 | + utilization_earnings.max(minimum) | |
| 313 | + } | |
| 314 | + | |
| 315 | + /// Calculate uptime bonus | |
| 316 | + fn calculate_uptime_bonus(&self, metrics: &VolunteerMetrics) -> u64 { | |
| 317 | + let uptime_tier = self.bonuses.uptime_tiers.iter() | |
| 318 | + .find(|tier| metrics.uptime_percentage >= tier.threshold_percent) | |
| 319 | + .unwrap_or(&self.bonuses.uptime_tiers[self.bonuses.uptime_tiers.len() - 1]); | |
| 320 | + | |
| 321 | + let base_bonus = (metrics.uptime_hours_24h * self.rates.uptime_bonus_rate as f64) as u64; | |
| 322 | + (base_bonus as f64 * uptime_tier.multiplier) as u64 | |
| 323 | + } | |
| 324 | + | |
| 325 | + /// Calculate performance bonus | |
| 326 | + fn calculate_performance_bonus(&self, metrics: &VolunteerMetrics) -> u64 { | |
| 327 | + let mut multiplier = 1.0; | |
| 328 | + | |
| 329 | + // Speed bonus | |
| 330 | + if metrics.transfer_speed_mbps >= 100.0 { | |
| 331 | + multiplier *= self.bonuses.performance_multipliers.excellent_speed; | |
| 332 | + } else if metrics.transfer_speed_mbps >= 50.0 { | |
| 333 | + multiplier *= self.bonuses.performance_multipliers.good_speed; | |
| 334 | + } else if metrics.transfer_speed_mbps >= 10.0 { | |
| 335 | + multiplier *= self.bonuses.performance_multipliers.average_speed; | |
| 336 | + } | |
| 337 | + | |
| 338 | + // Response time bonus | |
| 339 | + if metrics.response_time_ms < 100 { | |
| 340 | + multiplier *= self.bonuses.performance_multipliers.low_response_time; | |
| 341 | + } | |
| 342 | + | |
| 343 | + // Reliability bonus | |
| 344 | + let success_rate = if metrics.successful_transfers + metrics.failed_transfers > 0 { | |
| 345 | + metrics.successful_transfers as f64 / (metrics.successful_transfers + metrics.failed_transfers) as f64 | |
| 346 | + } else { | |
| 347 | + 1.0 | |
| 348 | + }; | |
| 349 | + | |
| 350 | + if success_rate >= 0.99 { | |
| 351 | + multiplier *= self.bonuses.performance_multipliers.high_reliability; | |
| 352 | + } | |
| 353 | + | |
| 354 | + (self.rates.performance_bonus_rate as f64 * multiplier) as u64 | |
| 355 | + } | |
| 356 | + | |
| 357 | + /// Calculate geographic diversity bonus | |
| 358 | + fn calculate_geographic_bonus(&self, metrics: &VolunteerMetrics) -> u64 { | |
| 359 | + let bonus_multiplier = self.bonuses.geographic_bonuses | |
| 360 | + .get(&metrics.geographic_region) | |
| 361 | + .copied() | |
| 362 | + .unwrap_or(0.0); | |
| 363 | + | |
| 364 | + (self.rates.geographic_bonus_rate as f64 * (1.0 + bonus_multiplier)) as u64 | |
| 365 | + } | |
| 366 | + | |
| 367 | + /// Calculate longevity bonus | |
| 368 | + fn calculate_longevity_bonus(&self, metrics: &VolunteerMetrics) -> u64 { | |
| 369 | + let months_active = (Utc::now() - metrics.joined_at).num_days() / 30; | |
| 370 | + | |
| 371 | + let tenure_tier = self.bonuses.tenure_bonuses.iter() | |
| 372 | + .find(|tier| months_active >= tier.months as i64) | |
| 373 | + .cloned() | |
| 374 | + .unwrap_or(TenureTier { months: 0, multiplier: 1.0, name: "New".to_string() }); | |
| 375 | + | |
| 376 | + (self.rates.longevity_bonus_rate as f64 * tenure_tier.multiplier) as u64 | |
| 377 | + } | |
| 378 | + | |
| 379 | + /// Calculate network health bonus | |
| 380 | + fn calculate_network_health_bonus(&self, base_earnings: u64) -> u64 { | |
| 381 | + // Bonus when network is performing well | |
| 382 | + let health_score = self.calculate_network_health_score(); | |
| 383 | + | |
| 384 | + if health_score >= 95.0 { | |
| 385 | + (base_earnings as f64 * self.bonuses.network_health_bonus) as u64 | |
| 386 | + } else { | |
| 387 | + 0 | |
| 388 | + } | |
| 389 | + } | |
| 390 | + | |
| 391 | + /// Calculate overall network health score | |
| 392 | + fn calculate_network_health_score(&self) -> f64 { | |
| 393 | + let metrics = &self.network_metrics; | |
| 394 | + | |
| 395 | + // Weighted health score | |
| 396 | + let uptime_score = metrics.average_uptime.min(100.0); | |
| 397 | + let diversity_score = metrics.geographic_diversity.min(100.0); | |
| 398 | + let durability_score = metrics.data_durability.min(100.0); | |
| 399 | + | |
| 400 | + (uptime_score * 0.4 + diversity_score * 0.3 + durability_score * 0.3) | |
| 401 | + } | |
| 402 | + | |
| 403 | + /// Calculate hourly earnings for a volunteer | |
| 404 | + pub fn calculate_hourly_earnings(&self, volunteer_id: &str) -> Result<u64> { | |
| 405 | + let daily_earnings = self.calculate_real_time_earnings(volunteer_id)?; | |
| 406 | + Ok(daily_earnings / 24) // Hourly rate | |
| 407 | + } | |
| 408 | + | |
| 409 | + /// Calculate earnings projection | |
| 410 | + pub fn calculate_earnings_projection(&self, volunteer_id: &str) -> Result<EarningsProjection> { | |
| 411 | + let daily_earnings = self.calculate_real_time_earnings(volunteer_id)?; | |
| 412 | + | |
| 413 | + // Get performance trend | |
| 414 | + let performance_trend = self.calculate_performance_trend(volunteer_id)?; | |
| 415 | + | |
| 416 | + // Network demand factor (based on utilization) | |
| 417 | + let demand_factor = (self.network_metrics.utilization_rate / 100.0).min(1.2); // Cap at 120% | |
| 418 | + | |
| 419 | + // Seasonal adjustment (placeholder - would use historical data) | |
| 420 | + let seasonal_factor = 1.0; | |
| 421 | + | |
| 422 | + // Growth factor based on network expansion | |
| 423 | + let growth_factor = if self.network_metrics.active_volunteers < 100 { | |
| 424 | + 1.2 // Early network bonus | |
| 425 | + } else { | |
| 426 | + 1.0 | |
| 427 | + }; | |
| 428 | + | |
| 429 | + let factors = ProjectionFactors { | |
| 430 | + current_performance: performance_trend, | |
| 431 | + network_demand: demand_factor, | |
| 432 | + seasonal_adjustment: seasonal_factor, | |
| 433 | + growth_factor, | |
| 434 | + }; | |
| 435 | + | |
| 436 | + let adjusted_daily = (daily_earnings as f64 * performance_trend * demand_factor * growth_factor) as u64; | |
| 437 | + | |
| 438 | + Ok(EarningsProjection { | |
| 439 | + daily_estimate: adjusted_daily, | |
| 440 | + weekly_estimate: adjusted_daily * 7, | |
| 441 | + monthly_estimate: adjusted_daily * 30, | |
| 442 | + annual_estimate: adjusted_daily * 365, | |
| 443 | + factors, | |
| 444 | + }) | |
| 445 | + } | |
| 446 | + | |
| 447 | + /// Calculate performance trend for projections | |
| 448 | + fn calculate_performance_trend(&self, volunteer_id: &str) -> Result<f64> { | |
| 449 | + let history = self.performance_history.get(volunteer_id) | |
| 450 | + .ok_or_else(|| anyhow::anyhow!("No performance history found"))?; | |
| 451 | + | |
| 452 | + if history.len() < 2 { | |
| 453 | + return Ok(1.0); // No trend data | |
| 454 | + } | |
| 455 | + | |
| 456 | + // Calculate trend over last 7 days | |
| 457 | + let recent_records: Vec<_> = history.iter().rev().take(168).collect(); // Last 7 days (hourly) | |
| 458 | + | |
| 459 | + if recent_records.len() < 24 { | |
| 460 | + return Ok(1.0); | |
| 461 | + } | |
| 462 | + | |
| 463 | + let recent_performance = recent_records.iter().take(24) | |
| 464 | + .map(|r| r.success_rate * (r.transfer_speed_mbps / 50.0).min(2.0)) | |
| 465 | + .sum::<f64>() / 24.0; | |
| 466 | + | |
| 467 | + let older_performance = recent_records.iter().skip(24).take(24) | |
| 468 | + .map(|r| r.success_rate * (r.transfer_speed_mbps / 50.0).min(2.0)) | |
| 469 | + .sum::<f64>() / 24.0; | |
| 470 | + | |
| 471 | + if older_performance > 0.0 { | |
| 472 | + Ok((recent_performance / older_performance).max(0.5).min(2.0)) // Cap trend between 0.5x and 2x | |
| 473 | + } else { | |
| 474 | + Ok(1.0) | |
| 475 | + } | |
| 476 | + } | |
| 477 | + | |
| 478 | + /// Update daily earnings summary | |
| 479 | + pub fn update_daily_earnings(&mut self, volunteer_id: &str) -> Result<()> { | |
| 480 | + let metrics = self.volunteer_metrics.get(volunteer_id) | |
| 481 | + .ok_or_else(|| anyhow::anyhow!("Volunteer metrics not found"))?; | |
| 482 | + | |
| 483 | + let today = Utc::now().date_naive(); | |
| 484 | + let key = format!("{}_{}", volunteer_id, today.format("%Y-%m-%d")); | |
| 485 | + | |
| 486 | + let base_earnings = self.calculate_base_storage_earnings(metrics); | |
| 487 | + let uptime_bonus = self.calculate_uptime_bonus(metrics); | |
| 488 | + let performance_bonus = self.calculate_performance_bonus(metrics); | |
| 489 | + let geographic_bonus = self.calculate_geographic_bonus(metrics); | |
| 490 | + let longevity_bonus = self.calculate_longevity_bonus(metrics); | |
| 491 | + let network_bonus = self.calculate_network_health_bonus(base_earnings); | |
| 492 | + | |
| 493 | + let daily_earnings = DailyEarnings { | |
| 494 | + date: Utc::now(), | |
| 495 | + base_earnings, | |
| 496 | + uptime_bonus, | |
| 497 | + performance_bonus, | |
| 498 | + geographic_bonus, | |
| 499 | + longevity_bonus, | |
| 500 | + network_bonus, | |
| 501 | + total_earnings: base_earnings + uptime_bonus + performance_bonus | |
| 502 | + + geographic_bonus + longevity_bonus + network_bonus, | |
| 503 | + storage_gb_hours: metrics.used_storage_gb * 24, // GB-hours for the day | |
| 504 | + actual_uptime_hours: metrics.uptime_hours_24h, | |
| 505 | + }; | |
| 506 | + | |
| 507 | + self.daily_earnings.insert(key, daily_earnings); | |
| 508 | + | |
| 509 | + Ok(()) | |
| 510 | + } | |
| 511 | + | |
| 512 | + /// Get earnings breakdown for display | |
| 513 | + pub fn get_earnings_breakdown(&self, volunteer_id: &str) -> Result<HashMap<String, u64>> { | |
| 514 | + let metrics = self.volunteer_metrics.get(volunteer_id) | |
| 515 | + .ok_or_else(|| anyhow::anyhow!("Volunteer metrics not found"))?; | |
| 516 | + | |
| 517 | + let mut breakdown = HashMap::new(); | |
| 518 | + breakdown.insert("base_storage".to_string(), self.calculate_base_storage_earnings(metrics)); | |
| 519 | + breakdown.insert("uptime_bonus".to_string(), self.calculate_uptime_bonus(metrics)); | |
| 520 | + breakdown.insert("performance_bonus".to_string(), self.calculate_performance_bonus(metrics)); | |
| 521 | + breakdown.insert("geographic_bonus".to_string(), self.calculate_geographic_bonus(metrics)); | |
| 522 | + breakdown.insert("longevity_bonus".to_string(), self.calculate_longevity_bonus(metrics)); | |
| 523 | + breakdown.insert("network_bonus".to_string(), self.calculate_network_health_bonus(self.calculate_base_storage_earnings(metrics))); | |
| 524 | + | |
| 525 | + Ok(breakdown) | |
| 526 | + } | |
| 527 | + | |
| 528 | + /// Update network metrics | |
| 529 | + pub fn update_network_metrics(&mut self, metrics: NetworkHealthMetrics) { | |
| 530 | + self.network_metrics = metrics; | |
| 531 | + } | |
| 532 | +} | |
| 533 | + | |
| 534 | +#[cfg(test)] | |
| 535 | +mod tests { | |
| 536 | + use super::*; | |
| 537 | + | |
| 538 | + #[test] | |
| 539 | + fn test_earnings_calculation() { | |
| 540 | + let mut calculator = EarningsCalculator::new(); | |
| 541 | + | |
| 542 | + let metrics = VolunteerMetrics { | |
| 543 | + volunteer_id: "test_volunteer".to_string(), | |
| 544 | + total_storage_gb: 100, | |
| 545 | + available_storage_gb: 50, | |
| 546 | + used_storage_gb: 50, | |
| 547 | + uptime_hours_24h: 24.0, | |
| 548 | + uptime_percentage: 100.0, | |
| 549 | + response_time_ms: 50, | |
| 550 | + transfer_speed_mbps: 100.0, | |
| 551 | + successful_transfers: 100, | |
| 552 | + failed_transfers: 0, | |
| 553 | + geographic_region: GeographicRegion::Europe, | |
| 554 | + connection_quality: ConnectionQuality::Excellent, | |
| 555 | + reliability_score: 1.0, | |
| 556 | + last_seen: Utc::now(), | |
| 557 | + joined_at: Utc::now() - Duration::days(365), | |
| 558 | + }; | |
| 559 | + | |
| 560 | + calculator.update_volunteer_metrics(metrics); | |
| 561 | + | |
| 562 | + let earnings = calculator.calculate_real_time_earnings("test_volunteer").unwrap(); | |
| 563 | + assert!(earnings > 0); | |
| 564 | + | |
| 565 | + let breakdown = calculator.get_earnings_breakdown("test_volunteer").unwrap(); | |
| 566 | + assert!(breakdown.contains_key("base_storage")); | |
| 567 | + assert!(breakdown.contains_key("uptime_bonus")); | |
| 568 | + } | |
| 569 | + | |
| 570 | + #[test] | |
| 571 | + fn test_performance_multipliers() { | |
| 572 | + let calculator = EarningsCalculator::new(); | |
| 573 | + | |
| 574 | + let high_performance_metrics = VolunteerMetrics { | |
| 575 | + volunteer_id: "high_perf".to_string(), | |
| 576 | + total_storage_gb: 100, | |
| 577 | + available_storage_gb: 0, | |
| 578 | + used_storage_gb: 100, | |
| 579 | + uptime_hours_24h: 24.0, | |
| 580 | + uptime_percentage: 99.9, | |
| 581 | + response_time_ms: 25, | |
| 582 | + transfer_speed_mbps: 150.0, | |
| 583 | + successful_transfers: 1000, | |
| 584 | + failed_transfers: 1, | |
| 585 | + geographic_region: GeographicRegion::Rare, | |
| 586 | + connection_quality: ConnectionQuality::Excellent, | |
| 587 | + reliability_score: 1.0, | |
| 588 | + last_seen: Utc::now(), | |
| 589 | + joined_at: Utc::now() - Duration::days(730), | |
| 590 | + }; | |
| 591 | + | |
| 592 | + let high_perf_bonus = calculator.calculate_performance_bonus(&high_performance_metrics); | |
| 593 | + | |
| 594 | + let low_performance_metrics = VolunteerMetrics { | |
| 595 | + volunteer_id: "low_perf".to_string(), | |
| 596 | + total_storage_gb: 100, | |
| 597 | + available_storage_gb: 80, | |
| 598 | + used_storage_gb: 20, | |
| 599 | + uptime_hours_24h: 20.0, | |
| 600 | + uptime_percentage: 83.3, | |
| 601 | + response_time_ms: 200, | |
| 602 | + transfer_speed_mbps: 5.0, | |
| 603 | + successful_transfers: 80, | |
| 604 | + failed_transfers: 20, | |
| 605 | + geographic_region: GeographicRegion::NorthAmerica, | |
| 606 | + connection_quality: ConnectionQuality::Fair, | |
| 607 | + reliability_score: 0.8, | |
| 608 | + last_seen: Utc::now(), | |
| 609 | + joined_at: Utc::now() - Duration::days(30), | |
| 610 | + }; | |
| 611 | + | |
| 612 | + let low_perf_bonus = calculator.calculate_performance_bonus(&low_performance_metrics); | |
| 613 | + | |
| 614 | + assert!(high_perf_bonus > low_perf_bonus); | |
| 615 | + } | |
| 616 | +} | |
src/economics/market_maker.rsadded@@ -0,0 +1,629 @@ | ||
| 1 | +//! Automated Market Maker for ZephyrCoin Price Stability | |
| 2 | +//! | |
| 3 | +//! Maintains stable token value through algorithmic trading and liquidity provision | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::{HashMap, VecDeque}; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | + | |
| 10 | +/// Automated Market Maker for ZephyrCoin | |
| 11 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 12 | +pub struct ZephyrCoinAMM { | |
| 13 | + /// Liquidity pools for different pairs | |
| 14 | + pub pools: HashMap<TradingPair, LiquidityPool>, | |
| 15 | + /// Target price in USD (stable value target) | |
| 16 | + pub target_price_usd: f64, | |
| 17 | + /// Price stability configuration | |
| 18 | + pub stability_config: StabilityConfig, | |
| 19 | + /// Trading history for price analysis | |
| 20 | + pub price_history: VecDeque<PriceSnapshot>, | |
| 21 | + /// Current reserves | |
| 22 | + pub reserves: Reserves, | |
| 23 | + /// Fee structure | |
| 24 | + pub fees: FeeStructure, | |
| 25 | +} | |
| 26 | + | |
| 27 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 28 | +pub enum TradingPair { | |
| 29 | + ZEPH_USD, | |
| 30 | + ZEPH_ETH, | |
| 31 | + ZEPH_BTC, | |
| 32 | + ZEPH_USDC, | |
| 33 | +} | |
| 34 | + | |
| 35 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 36 | +pub struct LiquidityPool { | |
| 37 | + /// Pool reserves | |
| 38 | + pub reserve_a: u64, // ZEPH tokens | |
| 39 | + pub reserve_b: u64, // Other asset (in smallest units) | |
| 40 | + /// Total liquidity provider shares | |
| 41 | + pub total_shares: u64, | |
| 42 | + /// Liquidity provider positions | |
| 43 | + pub lp_positions: HashMap<String, LPPosition>, | |
| 44 | + /// Pool fee rate (e.g., 0.003 for 0.3%) | |
| 45 | + pub fee_rate: f64, | |
| 46 | + /// Last price update | |
| 47 | + pub last_update: DateTime<Utc>, | |
| 48 | + /// Price impact protection | |
| 49 | + pub max_slippage: f64, | |
| 50 | +} | |
| 51 | + | |
| 52 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 53 | +pub struct LPPosition { | |
| 54 | + pub shares: u64, | |
| 55 | + pub provided_at: DateTime<Utc>, | |
| 56 | + pub initial_zeph: u64, | |
| 57 | + pub initial_other: u64, | |
| 58 | +} | |
| 59 | + | |
| 60 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 61 | +pub struct StabilityConfig { | |
| 62 | + /// Price deviation threshold for intervention (5%) | |
| 63 | + pub intervention_threshold: f64, | |
| 64 | + /// Maximum daily price change (10%) | |
| 65 | + pub max_daily_change: f64, | |
| 66 | + /// Rebalancing frequency (hours) | |
| 67 | + pub rebalance_frequency: u32, | |
| 68 | + /// Emergency circuit breaker threshold (20%) | |
| 69 | + pub circuit_breaker_threshold: f64, | |
| 70 | + /// Minimum liquidity ratio | |
| 71 | + pub min_liquidity_ratio: f64, | |
| 72 | +} | |
| 73 | + | |
| 74 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 75 | +pub struct Reserves { | |
| 76 | + /// ZEPH token reserves for market making | |
| 77 | + pub zeph_reserve: u64, | |
| 78 | + /// USD equivalent reserves | |
| 79 | + pub usd_reserve: u64, | |
| 80 | + /// Emergency reserves | |
| 81 | + pub emergency_reserve: u64, | |
| 82 | + /// Insurance fund | |
| 83 | + pub insurance_fund: u64, | |
| 84 | +} | |
| 85 | + | |
| 86 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 87 | +pub struct FeeStructure { | |
| 88 | + /// Trading fee (0.3%) | |
| 89 | + pub trading_fee: f64, | |
| 90 | + /// Stability fee for interventions (0.1%) | |
| 91 | + pub stability_fee: f64, | |
| 92 | + /// LP reward rate (daily APY) | |
| 93 | + pub lp_reward_rate: f64, | |
| 94 | + /// Protocol fee (goes to treasury) | |
| 95 | + pub protocol_fee: f64, | |
| 96 | +} | |
| 97 | + | |
| 98 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 99 | +pub struct PriceSnapshot { | |
| 100 | + pub timestamp: DateTime<Utc>, | |
| 101 | + pub price_usd: f64, | |
| 102 | + pub volume_24h: u64, | |
| 103 | + pub liquidity_depth: u64, | |
| 104 | + pub volatility: f64, | |
| 105 | +} | |
| 106 | + | |
| 107 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 108 | +pub struct TradeExecution { | |
| 109 | + pub pair: TradingPair, | |
| 110 | + pub amount_in: u64, | |
| 111 | + pub amount_out: u64, | |
| 112 | + pub price: f64, | |
| 113 | + pub fee: u64, | |
| 114 | + pub slippage: f64, | |
| 115 | + pub timestamp: DateTime<Utc>, | |
| 116 | +} | |
| 117 | + | |
| 118 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 119 | +pub enum MarketOperation { | |
| 120 | + Buy { amount_usd: u64 }, | |
| 121 | + Sell { amount_zeph: u64 }, | |
| 122 | + AddLiquidity { zeph_amount: u64, usd_amount: u64 }, | |
| 123 | + RemoveLiquidity { shares: u64 }, | |
| 124 | + Rebalance, | |
| 125 | + EmergencyHalt, | |
| 126 | +} | |
| 127 | + | |
| 128 | +impl Default for StabilityConfig { | |
| 129 | + fn default() -> Self { | |
| 130 | + Self { | |
| 131 | + intervention_threshold: 0.05, // 5% | |
| 132 | + max_daily_change: 0.10, // 10% | |
| 133 | + rebalance_frequency: 4, // Every 4 hours | |
| 134 | + circuit_breaker_threshold: 0.20, // 20% | |
| 135 | + min_liquidity_ratio: 0.20, // 20% min liquidity | |
| 136 | + } | |
| 137 | + } | |
| 138 | +} | |
| 139 | + | |
| 140 | +impl Default for FeeStructure { | |
| 141 | + fn default() -> Self { | |
| 142 | + Self { | |
| 143 | + trading_fee: 0.003, // 0.3% | |
| 144 | + stability_fee: 0.001, // 0.1% | |
| 145 | + lp_reward_rate: 0.05, // 5% APY | |
| 146 | + protocol_fee: 0.0005, // 0.05% | |
| 147 | + } | |
| 148 | + } | |
| 149 | +} | |
| 150 | + | |
| 151 | +impl ZephyrCoinAMM { | |
| 152 | + /// Create new AMM with initial liquidity | |
| 153 | + pub fn new( | |
| 154 | + target_price_usd: f64, | |
| 155 | + initial_zeph: u64, | |
| 156 | + initial_usd: u64, | |
| 157 | + ) -> Self { | |
| 158 | + let mut pools = HashMap::new(); | |
| 159 | + | |
| 160 | + // Initialize ZEPH/USD pool | |
| 161 | + pools.insert(TradingPair::ZEPH_USD, LiquidityPool { | |
| 162 | + reserve_a: initial_zeph, | |
| 163 | + reserve_b: initial_usd, | |
| 164 | + total_shares: (initial_zeph * initial_usd).integer_sqrt(), | |
| 165 | + lp_positions: HashMap::new(), | |
| 166 | + fee_rate: 0.003, | |
| 167 | + last_update: Utc::now(), | |
| 168 | + max_slippage: 0.05, // 5% max slippage | |
| 169 | + }); | |
| 170 | + | |
| 171 | + Self { | |
| 172 | + pools, | |
| 173 | + target_price_usd, | |
| 174 | + stability_config: StabilityConfig::default(), | |
| 175 | + price_history: VecDeque::with_capacity(1440), // 24 hours of minute data | |
| 176 | + reserves: Reserves { | |
| 177 | + zeph_reserve: initial_zeph / 2, // Keep 50% as reserves | |
| 178 | + usd_reserve: initial_usd / 2, | |
| 179 | + emergency_reserve: initial_zeph / 10, // 10% emergency | |
| 180 | + insurance_fund: initial_usd / 20, // 5% insurance | |
| 181 | + }, | |
| 182 | + fees: FeeStructure::default(), | |
| 183 | + } | |
| 184 | + } | |
| 185 | + | |
| 186 | + /// Get current price for a trading pair | |
| 187 | + pub fn get_current_price(&self, pair: &TradingPair) -> Result<f64> { | |
| 188 | + let pool = self.pools.get(pair) | |
| 189 | + .ok_or_else(|| anyhow::anyhow!("Trading pair not found"))?; | |
| 190 | + | |
| 191 | + match pair { | |
| 192 | + TradingPair::ZEPH_USD => { | |
| 193 | + if pool.reserve_a == 0 { | |
| 194 | + return Err(anyhow::anyhow!("No ZEPH liquidity")); | |
| 195 | + } | |
| 196 | + Ok(pool.reserve_b as f64 / pool.reserve_a as f64) | |
| 197 | + }, | |
| 198 | + _ => Err(anyhow::anyhow!("Price calculation not implemented for this pair")), | |
| 199 | + } | |
| 200 | + } | |
| 201 | + | |
| 202 | + /// Calculate price impact for a trade | |
| 203 | + pub fn calculate_price_impact(&self, pair: &TradingPair, amount_in: u64, buy: bool) -> Result<f64> { | |
| 204 | + let pool = self.pools.get(pair) | |
| 205 | + .ok_or_else(|| anyhow::anyhow!("Trading pair not found"))?; | |
| 206 | + | |
| 207 | + let (reserve_in, reserve_out) = if buy { | |
| 208 | + (pool.reserve_b, pool.reserve_a) // Buying ZEPH with USD | |
| 209 | + } else { | |
| 210 | + (pool.reserve_a, pool.reserve_b) // Selling ZEPH for USD | |
| 211 | + }; | |
| 212 | + | |
| 213 | + // Constant product formula: x * y = k | |
| 214 | + let k = reserve_in * reserve_out; | |
| 215 | + let new_reserve_in = reserve_in + amount_in; | |
| 216 | + let new_reserve_out = k / new_reserve_in; | |
| 217 | + let amount_out = reserve_out - new_reserve_out; | |
| 218 | + | |
| 219 | + // Calculate price impact | |
| 220 | + let current_price = reserve_out as f64 / reserve_in as f64; | |
| 221 | + let execution_price = amount_out as f64 / amount_in as f64; | |
| 222 | + let price_impact = ((execution_price - current_price) / current_price).abs(); | |
| 223 | + | |
| 224 | + Ok(price_impact) | |
| 225 | + } | |
| 226 | + | |
| 227 | + /// Execute a swap with slippage protection | |
| 228 | + pub fn execute_swap( | |
| 229 | + &mut self, | |
| 230 | + pair: TradingPair, | |
| 231 | + amount_in: u64, | |
| 232 | + min_amount_out: u64, | |
| 233 | + buy: bool, | |
| 234 | + ) -> Result<TradeExecution> { | |
| 235 | + let pool = self.pools.get_mut(&pair) | |
| 236 | + .ok_or_else(|| anyhow::anyhow!("Trading pair not found"))?; | |
| 237 | + | |
| 238 | + // Check price impact | |
| 239 | + let price_impact = self.calculate_price_impact(&pair, amount_in, buy)?; | |
| 240 | + if price_impact > pool.max_slippage { | |
| 241 | + return Err(anyhow::anyhow!("Price impact too high: {:.2}%", price_impact * 100.0)); | |
| 242 | + } | |
| 243 | + | |
| 244 | + let (reserve_in, reserve_out) = if buy { | |
| 245 | + (&mut pool.reserve_b, &mut pool.reserve_a) | |
| 246 | + } else { | |
| 247 | + (&mut pool.reserve_a, &mut pool.reserve_b) | |
| 248 | + }; | |
| 249 | + | |
| 250 | + // Calculate output amount with fee | |
| 251 | + let amount_in_with_fee = (amount_in as f64 * (1.0 - pool.fee_rate)) as u64; | |
| 252 | + let k = *reserve_in * *reserve_out; | |
| 253 | + let new_reserve_in = *reserve_in + amount_in_with_fee; | |
| 254 | + let new_reserve_out = k / new_reserve_in; | |
| 255 | + let amount_out = *reserve_out - new_reserve_out; | |
| 256 | + | |
| 257 | + if amount_out < min_amount_out { | |
| 258 | + return Err(anyhow::anyhow!("Slippage tolerance exceeded")); | |
| 259 | + } | |
| 260 | + | |
| 261 | + // Update reserves | |
| 262 | + *reserve_in += amount_in; | |
| 263 | + *reserve_out = new_reserve_out; | |
| 264 | + | |
| 265 | + let execution_price = amount_out as f64 / amount_in as f64; | |
| 266 | + let fee = amount_in - amount_in_with_fee; | |
| 267 | + | |
| 268 | + pool.last_update = Utc::now(); | |
| 269 | + | |
| 270 | + Ok(TradeExecution { | |
| 271 | + pair, | |
| 272 | + amount_in, | |
| 273 | + amount_out, | |
| 274 | + price: execution_price, | |
| 275 | + fee, | |
| 276 | + slippage: price_impact, | |
| 277 | + timestamp: Utc::now(), | |
| 278 | + }) | |
| 279 | + } | |
| 280 | + | |
| 281 | + /// Add liquidity to a pool | |
| 282 | + pub fn add_liquidity( | |
| 283 | + &mut self, | |
| 284 | + pair: TradingPair, | |
| 285 | + user: String, | |
| 286 | + amount_a: u64, | |
| 287 | + amount_b: u64, | |
| 288 | + ) -> Result<u64> { | |
| 289 | + let pool = self.pools.get_mut(&pair) | |
| 290 | + .ok_or_else(|| anyhow::anyhow!("Trading pair not found"))?; | |
| 291 | + | |
| 292 | + // Calculate optimal amounts based on current ratio | |
| 293 | + let ratio = pool.reserve_b as f64 / pool.reserve_a as f64; | |
| 294 | + let optimal_b = (amount_a as f64 * ratio) as u64; | |
| 295 | + | |
| 296 | + let (final_a, final_b) = if optimal_b <= amount_b { | |
| 297 | + (amount_a, optimal_b) | |
| 298 | + } else { | |
| 299 | + let optimal_a = (amount_b as f64 / ratio) as u64; | |
| 300 | + (optimal_a, amount_b) | |
| 301 | + }; | |
| 302 | + | |
| 303 | + // Calculate LP shares | |
| 304 | + let liquidity = if pool.total_shares == 0 { | |
| 305 | + (final_a * final_b).integer_sqrt() | |
| 306 | + } else { | |
| 307 | + std::cmp::min( | |
| 308 | + final_a * pool.total_shares / pool.reserve_a, | |
| 309 | + final_b * pool.total_shares / pool.reserve_b, | |
| 310 | + ) | |
| 311 | + }; | |
| 312 | + | |
| 313 | + // Update pool | |
| 314 | + pool.reserve_a += final_a; | |
| 315 | + pool.reserve_b += final_b; | |
| 316 | + pool.total_shares += liquidity; | |
| 317 | + | |
| 318 | + // Record LP position | |
| 319 | + pool.lp_positions.insert(user, LPPosition { | |
| 320 | + shares: liquidity, | |
| 321 | + provided_at: Utc::now(), | |
| 322 | + initial_zeph: final_a, | |
| 323 | + initial_other: final_b, | |
| 324 | + }); | |
| 325 | + | |
| 326 | + pool.last_update = Utc::now(); | |
| 327 | + | |
| 328 | + Ok(liquidity) | |
| 329 | + } | |
| 330 | + | |
| 331 | + /// Remove liquidity from a pool | |
| 332 | + pub fn remove_liquidity( | |
| 333 | + &mut self, | |
| 334 | + pair: TradingPair, | |
| 335 | + user: String, | |
| 336 | + shares: u64, | |
| 337 | + ) -> Result<(u64, u64)> { | |
| 338 | + let pool = self.pools.get_mut(&pair) | |
| 339 | + .ok_or_else(|| anyhow::anyhow!("Trading pair not found"))?; | |
| 340 | + | |
| 341 | + let position = pool.lp_positions.get_mut(&user) | |
| 342 | + .ok_or_else(|| anyhow::anyhow!("No liquidity position found"))?; | |
| 343 | + | |
| 344 | + if position.shares < shares { | |
| 345 | + return Err(anyhow::anyhow!("Insufficient LP shares")); | |
| 346 | + } | |
| 347 | + | |
| 348 | + // Calculate withdrawal amounts | |
| 349 | + let amount_a = shares * pool.reserve_a / pool.total_shares; | |
| 350 | + let amount_b = shares * pool.reserve_b / pool.total_shares; | |
| 351 | + | |
| 352 | + // Update pool | |
| 353 | + pool.reserve_a -= amount_a; | |
| 354 | + pool.reserve_b -= amount_b; | |
| 355 | + pool.total_shares -= shares; | |
| 356 | + | |
| 357 | + // Update position | |
| 358 | + position.shares -= shares; | |
| 359 | + if position.shares == 0 { | |
| 360 | + pool.lp_positions.remove(&user); | |
| 361 | + } | |
| 362 | + | |
| 363 | + pool.last_update = Utc::now(); | |
| 364 | + | |
| 365 | + Ok((amount_a, amount_b)) | |
| 366 | + } | |
| 367 | + | |
| 368 | + /// Perform price stability intervention | |
| 369 | + pub async fn perform_stability_intervention(&mut self) -> Result<Vec<MarketOperation>> { | |
| 370 | + let current_price = self.get_current_price(&TradingPair::ZEPH_USD)?; | |
| 371 | + let price_deviation = (current_price - self.target_price_usd) / self.target_price_usd; | |
| 372 | + | |
| 373 | + let mut operations = Vec::new(); | |
| 374 | + | |
| 375 | + // Check if intervention is needed | |
| 376 | + if price_deviation.abs() > self.stability_config.intervention_threshold { | |
| 377 | + tracing::info!("Price deviation detected: {:.2}%, target: ${:.4}, current: ${:.4}", | |
| 378 | + price_deviation * 100.0, self.target_price_usd, current_price); | |
| 379 | + | |
| 380 | + if price_deviation > 0.0 { | |
| 381 | + // Price too high - sell ZEPH to decrease price | |
| 382 | + let sell_amount = self.calculate_intervention_amount(price_deviation, false)?; | |
| 383 | + operations.push(MarketOperation::Sell { amount_zeph: sell_amount }); | |
| 384 | + } else { | |
| 385 | + // Price too low - buy ZEPH to increase price | |
| 386 | + let buy_amount_usd = self.calculate_intervention_amount(price_deviation.abs(), true)?; | |
| 387 | + operations.push(MarketOperation::Buy { amount_usd: buy_amount_usd }); | |
| 388 | + } | |
| 389 | + } | |
| 390 | + | |
| 391 | + // Execute emergency halt if needed | |
| 392 | + if price_deviation.abs() > self.stability_config.circuit_breaker_threshold { | |
| 393 | + tracing::warn!("Emergency circuit breaker triggered at {:.2}% deviation", price_deviation * 100.0); | |
| 394 | + operations.push(MarketOperation::EmergencyHalt); | |
| 395 | + } | |
| 396 | + | |
| 397 | + Ok(operations) | |
| 398 | + } | |
| 399 | + | |
| 400 | + /// Calculate intervention amount based on price deviation | |
| 401 | + fn calculate_intervention_amount(&self, deviation: f64, is_buy: bool) -> Result<u64> { | |
| 402 | + let pool = self.pools.get(&TradingPair::ZEPH_USD) | |
| 403 | + .ok_or_else(|| anyhow::anyhow!("ZEPH/USD pool not found"))?; | |
| 404 | + | |
| 405 | + // Use a fraction of reserves based on deviation severity | |
| 406 | + let intervention_factor = (deviation / self.stability_config.intervention_threshold).min(1.0); | |
| 407 | + | |
| 408 | + if is_buy { | |
| 409 | + // Buy ZEPH with USD reserves | |
| 410 | + let max_usd = self.reserves.usd_reserve / 10; // Max 10% of reserves per intervention | |
| 411 | + Ok((max_usd as f64 * intervention_factor) as u64) | |
| 412 | + } else { | |
| 413 | + // Sell ZEPH from reserves | |
| 414 | + let max_zeph = self.reserves.zeph_reserve / 10; // Max 10% of reserves per intervention | |
| 415 | + Ok((max_zeph as f64 * intervention_factor) as u64) | |
| 416 | + } | |
| 417 | + } | |
| 418 | + | |
| 419 | + /// Update price history | |
| 420 | + pub fn update_price_history(&mut self, price: f64, volume: u64) { | |
| 421 | + let snapshot = PriceSnapshot { | |
| 422 | + timestamp: Utc::now(), | |
| 423 | + price_usd: price, | |
| 424 | + volume_24h: volume, | |
| 425 | + liquidity_depth: self.calculate_liquidity_depth(), | |
| 426 | + volatility: self.calculate_volatility(), | |
| 427 | + }; | |
| 428 | + | |
| 429 | + self.price_history.push_back(snapshot); | |
| 430 | + | |
| 431 | + // Keep only last 24 hours | |
| 432 | + while self.price_history.len() > 1440 { | |
| 433 | + self.price_history.pop_front(); | |
| 434 | + } | |
| 435 | + } | |
| 436 | + | |
| 437 | + /// Calculate current liquidity depth | |
| 438 | + fn calculate_liquidity_depth(&self) -> u64 { | |
| 439 | + self.pools.get(&TradingPair::ZEPH_USD) | |
| 440 | + .map(|pool| pool.reserve_a + pool.reserve_b) | |
| 441 | + .unwrap_or(0) | |
| 442 | + } | |
| 443 | + | |
| 444 | + /// Calculate price volatility (24h) | |
| 445 | + fn calculate_volatility(&self) -> f64 { | |
| 446 | + if self.price_history.len() < 2 { | |
| 447 | + return 0.0; | |
| 448 | + } | |
| 449 | + | |
| 450 | + let prices: Vec<f64> = self.price_history.iter().map(|s| s.price_usd).collect(); | |
| 451 | + let mean = prices.iter().sum::<f64>() / prices.len() as f64; | |
| 452 | + let variance = prices.iter() | |
| 453 | + .map(|price| (price - mean).powi(2)) | |
| 454 | + .sum::<f64>() / prices.len() as f64; | |
| 455 | + | |
| 456 | + variance.sqrt() / mean // Coefficient of variation | |
| 457 | + } | |
| 458 | + | |
| 459 | + /// Run automated market making loop | |
| 460 | + pub async fn run_automated_trading(&mut self, interval_minutes: u64) -> Result<()> { | |
| 461 | + let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(interval_minutes * 60)); | |
| 462 | + | |
| 463 | + loop { | |
| 464 | + interval.tick().await; | |
| 465 | + | |
| 466 | + // Update current price | |
| 467 | + if let Ok(price) = self.get_current_price(&TradingPair::ZEPH_USD) { | |
| 468 | + self.update_price_history(price, 0); // Volume would be tracked separately | |
| 469 | + | |
| 470 | + // Perform stability intervention if needed | |
| 471 | + let operations = self.perform_stability_intervention().await?; | |
| 472 | + | |
| 473 | + for operation in operations { | |
| 474 | + match operation { | |
| 475 | + MarketOperation::Buy { amount_usd } => { | |
| 476 | + self.execute_stability_buy(amount_usd).await?; | |
| 477 | + }, | |
| 478 | + MarketOperation::Sell { amount_zeph } => { | |
| 479 | + self.execute_stability_sell(amount_zeph).await?; | |
| 480 | + }, | |
| 481 | + MarketOperation::EmergencyHalt => { | |
| 482 | + self.emergency_halt().await?; | |
| 483 | + return Ok(()); // Stop trading | |
| 484 | + }, | |
| 485 | + _ => {}, // Handle other operations as needed | |
| 486 | + } | |
| 487 | + } | |
| 488 | + | |
| 489 | + // Rebalance if needed | |
| 490 | + if self.should_rebalance().await? { | |
| 491 | + self.rebalance_pools().await?; | |
| 492 | + } | |
| 493 | + } | |
| 494 | + | |
| 495 | + tracing::debug!("AMM cycle complete"); | |
| 496 | + } | |
| 497 | + } | |
| 498 | + | |
| 499 | + /// Execute stability buy operation | |
| 500 | + async fn execute_stability_buy(&mut self, amount_usd: u64) -> Result<()> { | |
| 501 | + if amount_usd > self.reserves.usd_reserve { | |
| 502 | + return Err(anyhow::anyhow!("Insufficient USD reserves for stability buy")); | |
| 503 | + } | |
| 504 | + | |
| 505 | + let trade = self.execute_swap( | |
| 506 | + TradingPair::ZEPH_USD, | |
| 507 | + amount_usd, | |
| 508 | + 0, // No minimum for stability operations | |
| 509 | + true, | |
| 510 | + )?; | |
| 511 | + | |
| 512 | + self.reserves.usd_reserve -= amount_usd; | |
| 513 | + self.reserves.zeph_reserve += trade.amount_out; | |
| 514 | + | |
| 515 | + tracing::info!("Executed stability buy: {} USD -> {} ZEPH at ${:.4}", | |
| 516 | + amount_usd, trade.amount_out, trade.price); | |
| 517 | + | |
| 518 | + Ok(()) | |
| 519 | + } | |
| 520 | + | |
| 521 | + /// Execute stability sell operation | |
| 522 | + async fn execute_stability_sell(&mut self, amount_zeph: u64) -> Result<()> { | |
| 523 | + if amount_zeph > self.reserves.zeph_reserve { | |
| 524 | + return Err(anyhow::anyhow!("Insufficient ZEPH reserves for stability sell")); | |
| 525 | + } | |
| 526 | + | |
| 527 | + let trade = self.execute_swap( | |
| 528 | + TradingPair::ZEPH_USD, | |
| 529 | + amount_zeph, | |
| 530 | + 0, // No minimum for stability operations | |
| 531 | + false, | |
| 532 | + )?; | |
| 533 | + | |
| 534 | + self.reserves.zeph_reserve -= amount_zeph; | |
| 535 | + self.reserves.usd_reserve += trade.amount_out; | |
| 536 | + | |
| 537 | + tracing::info!("Executed stability sell: {} ZEPH -> {} USD at ${:.4}", | |
| 538 | + amount_zeph, trade.amount_out, trade.price); | |
| 539 | + | |
| 540 | + Ok(()) | |
| 541 | + } | |
| 542 | + | |
| 543 | + /// Emergency halt trading | |
| 544 | + async fn emergency_halt(&mut self) -> Result<()> { | |
| 545 | + tracing::error!("Emergency halt activated - suspending all trading"); | |
| 546 | + // In real implementation, would pause all pools and notify operators | |
| 547 | + Ok(()) | |
| 548 | + } | |
| 549 | + | |
| 550 | + /// Check if rebalancing is needed | |
| 551 | + async fn should_rebalance(&self) -> Result<bool> { | |
| 552 | + let last_rebalance = self.pools.get(&TradingPair::ZEPH_USD) | |
| 553 | + .map(|pool| pool.last_update) | |
| 554 | + .unwrap_or(Utc::now()); | |
| 555 | + | |
| 556 | + let hours_since_rebalance = (Utc::now() - last_rebalance).num_hours(); | |
| 557 | + Ok(hours_since_rebalance >= self.stability_config.rebalance_frequency as i64) | |
| 558 | + } | |
| 559 | + | |
| 560 | + /// Rebalance pools to maintain optimal ratios | |
| 561 | + async fn rebalance_pools(&mut self) -> Result<()> { | |
| 562 | + tracing::info!("Performing AMM rebalancing"); | |
| 563 | + | |
| 564 | + // Rebalancing logic would optimize pool ratios | |
| 565 | + // For now, just update timestamp | |
| 566 | + if let Some(pool) = self.pools.get_mut(&TradingPair::ZEPH_USD) { | |
| 567 | + pool.last_update = Utc::now(); | |
| 568 | + } | |
| 569 | + | |
| 570 | + Ok(()) | |
| 571 | + } | |
| 572 | +} | |
| 573 | + | |
| 574 | +trait IntegerSqrt { | |
| 575 | + fn integer_sqrt(self) -> Self; | |
| 576 | +} | |
| 577 | + | |
| 578 | +impl IntegerSqrt for u64 { | |
| 579 | + fn integer_sqrt(self) -> Self { | |
| 580 | + if self < 2 { | |
| 581 | + return self; | |
| 582 | + } | |
| 583 | + | |
| 584 | + let mut x = self; | |
| 585 | + let mut y = (x + 1) / 2; | |
| 586 | + | |
| 587 | + while y < x { | |
| 588 | + x = y; | |
| 589 | + y = (x + self / x) / 2; | |
| 590 | + } | |
| 591 | + | |
| 592 | + x | |
| 593 | + } | |
| 594 | +} | |
| 595 | + | |
| 596 | +#[cfg(test)] | |
| 597 | +mod tests { | |
| 598 | + use super::*; | |
| 599 | + | |
| 600 | + #[test] | |
| 601 | + fn test_amm_creation() { | |
| 602 | + let amm = ZephyrCoinAMM::new(0.10, 1_000_000, 100_000); // $0.10 target price | |
| 603 | + assert_eq!(amm.target_price_usd, 0.10); | |
| 604 | + assert!(amm.pools.contains_key(&TradingPair::ZEPH_USD)); | |
| 605 | + } | |
| 606 | + | |
| 607 | + #[test] | |
| 608 | + fn test_price_calculation() { | |
| 609 | + let amm = ZephyrCoinAMM::new(0.10, 1_000_000, 100_000); | |
| 610 | + let price = amm.get_current_price(&TradingPair::ZEPH_USD).unwrap(); | |
| 611 | + assert_eq!(price, 0.10); // 100,000 / 1,000,000 | |
| 612 | + } | |
| 613 | + | |
| 614 | + #[tokio::test] | |
| 615 | + async fn test_swap_execution() { | |
| 616 | + let mut amm = ZephyrCoinAMM::new(0.10, 1_000_000, 100_000); | |
| 617 | + | |
| 618 | + // Buy 1000 ZEPH with USD | |
| 619 | + let trade = amm.execute_swap( | |
| 620 | + TradingPair::ZEPH_USD, | |
| 621 | + 100, // 100 USD | |
| 622 | + 950, // Minimum 950 ZEPH (allowing for slippage) | |
| 623 | + true, | |
| 624 | + ).unwrap(); | |
| 625 | + | |
| 626 | + assert!(trade.amount_out >= 950); | |
| 627 | + assert!(trade.fee > 0); | |
| 628 | + } | |
| 629 | +} | |
src/economics/mod.rsadded@@ -0,0 +1,21 @@ | ||
| 1 | +//! Economics Module | |
| 2 | +//! | |
| 3 | +//! Complete economic system for ZephyrFS including token economics, payments, and rewards | |
| 4 | + | |
| 5 | +pub mod token_model; | |
| 6 | +pub mod zephyr_coin; | |
| 7 | +pub mod network_health_minter; | |
| 8 | +pub mod market_maker; | |
| 9 | +pub mod earnings_calculator; | |
| 10 | +pub mod payment_processor; | |
| 11 | +pub mod payout_scheduler; | |
| 12 | +pub mod performance_rewards; | |
| 13 | + | |
| 14 | +pub use token_model::{TokenEconomicsManager, TokenEconomics, NetworkHealthMetrics, RewardReason}; | |
| 15 | +pub use zephyr_coin::{ZephyrCoin, TokenEvent}; | |
| 16 | +pub use network_health_minter::{NetworkHealthController, HealthBasedMinter}; | |
| 17 | +pub use market_maker::{ZephyrCoinAMM, TradingPair, Currency as AMMCurrency}; | |
| 18 | +pub use earnings_calculator::{EarningsCalculator, VolunteerMetrics, EarningsProjection}; | |
| 19 | +pub use payment_processor::{PaymentProcessor, PaymentRequest, Currency, PaymentMethod}; | |
| 20 | +pub use payout_scheduler::{PayoutScheduler, PayoutPreferences, PayoutFrequency}; | |
| 21 | +pub use performance_rewards::{PerformanceRewardsSystem, PerformanceScore, Achievement, RewardTier}; | |
src/economics/network_health_minter.rsadded@@ -0,0 +1,516 @@ | ||
| 1 | +//! Network Health-Based Token Minting and Burning | |
| 2 | +//! | |
| 3 | +//! Automated token supply management based on ZephyrFS network metrics | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::HashMap; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | +use tokio::time::{sleep, Duration as TokioDuration}; | |
| 10 | + | |
| 11 | +use super::token_model::{TokenEconomicsManager, NetworkHealthMetrics}; | |
| 12 | +use super::zephyr_coin::{ZephyrCoin, TokenEvent}; | |
| 13 | + | |
| 14 | +/// Network health-based minting controller | |
| 15 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 16 | +pub struct HealthBasedMinter { | |
| 17 | + /// Target network health thresholds | |
| 18 | + pub health_thresholds: HealthThresholds, | |
| 19 | + /// Minting rates based on health | |
| 20 | + pub minting_rates: MintingRates, | |
| 21 | + /// Burning policies | |
| 22 | + pub burning_policies: BurningPolicies, | |
| 23 | + /// Last operation timestamps | |
| 24 | + pub last_operations: OperationTimestamps, | |
| 25 | + /// Network performance history | |
| 26 | + pub performance_history: Vec<PerformanceSnapshot>, | |
| 27 | +} | |
| 28 | + | |
| 29 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 30 | +pub struct HealthThresholds { | |
| 31 | + /// Minimum uptime for minting (95%) | |
| 32 | + pub min_uptime_percent: f64, | |
| 33 | + /// Minimum geographic diversity (50%) | |
| 34 | + pub min_geographic_diversity: f64, | |
| 35 | + /// Minimum data durability (99.9%) | |
| 36 | + pub min_data_durability: f64, | |
| 37 | + /// Target utilization range (70-85%) | |
| 38 | + pub target_utilization_min: f64, | |
| 39 | + pub target_utilization_max: f64, | |
| 40 | + /// Minimum active volunteers for rewards | |
| 41 | + pub min_active_volunteers: u32, | |
| 42 | +} | |
| 43 | + | |
| 44 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 45 | +pub struct MintingRates { | |
| 46 | + /// Base rate per healthy GB per day | |
| 47 | + pub base_rate_per_gb: u64, | |
| 48 | + /// Multipliers for health levels | |
| 49 | + pub excellent_multiplier: f64, // >98% health | |
| 50 | + pub good_multiplier: f64, // 90-98% health | |
| 51 | + pub fair_multiplier: f64, // 80-90% health | |
| 52 | + pub poor_multiplier: f64, // <80% health (reduced/no minting) | |
| 53 | + /// Bonus for rapid network growth | |
| 54 | + pub growth_bonus_multiplier: f64, | |
| 55 | + /// Emergency mint rate during network stress | |
| 56 | + pub emergency_rate_multiplier: f64, | |
| 57 | +} | |
| 58 | + | |
| 59 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 60 | +pub struct BurningPolicies { | |
| 61 | + /// Burn unused rewards after days | |
| 62 | + pub unused_reward_burn_days: u32, | |
| 63 | + /// Burn rate for idle tokens (percentage) | |
| 64 | + pub idle_token_burn_rate: f64, | |
| 65 | + /// Burn tokens during network congestion | |
| 66 | + pub congestion_burn_enabled: bool, | |
| 67 | + /// Maximum daily burn percentage | |
| 68 | + pub max_daily_burn_percent: f64, | |
| 69 | + /// Emergency burn during token oversupply | |
| 70 | + pub emergency_burn_threshold: f64, | |
| 71 | +} | |
| 72 | + | |
| 73 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 74 | +pub struct OperationTimestamps { | |
| 75 | + pub last_mint: DateTime<Utc>, | |
| 76 | + pub last_burn: DateTime<Utc>, | |
| 77 | + pub last_health_check: DateTime<Utc>, | |
| 78 | + pub last_emergency_action: DateTime<Utc>, | |
| 79 | +} | |
| 80 | + | |
| 81 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 82 | +pub struct PerformanceSnapshot { | |
| 83 | + pub timestamp: DateTime<Utc>, | |
| 84 | + pub metrics: NetworkHealthMetrics, | |
| 85 | + pub health_score: f64, | |
| 86 | + pub tokens_minted: u64, | |
| 87 | + pub tokens_burned: u64, | |
| 88 | + pub active_rewards: u64, | |
| 89 | +} | |
| 90 | + | |
| 91 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 92 | +pub enum HealthStatus { | |
| 93 | + Excellent, // >98% health score | |
| 94 | + Good, // 90-98% health score | |
| 95 | + Fair, // 80-90% health score | |
| 96 | + Poor, // <80% health score | |
| 97 | + Emergency, // Critical network issues | |
| 98 | +} | |
| 99 | + | |
| 100 | +impl Default for HealthThresholds { | |
| 101 | + fn default() -> Self { | |
| 102 | + Self { | |
| 103 | + min_uptime_percent: 95.0, | |
| 104 | + min_geographic_diversity: 50.0, | |
| 105 | + min_data_durability: 99.9, | |
| 106 | + target_utilization_min: 70.0, | |
| 107 | + target_utilization_max: 85.0, | |
| 108 | + min_active_volunteers: 10, | |
| 109 | + } | |
| 110 | + } | |
| 111 | +} | |
| 112 | + | |
| 113 | +impl Default for MintingRates { | |
| 114 | + fn default() -> Self { | |
| 115 | + Self { | |
| 116 | + base_rate_per_gb: 20_000_000_000_000_000, // 0.02 ZEPH per GB per day | |
| 117 | + excellent_multiplier: 1.5, | |
| 118 | + good_multiplier: 1.0, | |
| 119 | + fair_multiplier: 0.7, | |
| 120 | + poor_multiplier: 0.3, | |
| 121 | + growth_bonus_multiplier: 1.2, | |
| 122 | + emergency_rate_multiplier: 2.0, | |
| 123 | + } | |
| 124 | + } | |
| 125 | +} | |
| 126 | + | |
| 127 | +impl Default for BurningPolicies { | |
| 128 | + fn default() -> Self { | |
| 129 | + Self { | |
| 130 | + unused_reward_burn_days: 90, | |
| 131 | + idle_token_burn_rate: 0.01, // 1% quarterly | |
| 132 | + congestion_burn_enabled: true, | |
| 133 | + max_daily_burn_percent: 0.5, // 0.5% max daily burn | |
| 134 | + emergency_burn_threshold: 1.1, // 110% of target supply | |
| 135 | + } | |
| 136 | + } | |
| 137 | +} | |
| 138 | + | |
| 139 | +impl Default for HealthBasedMinter { | |
| 140 | + fn default() -> Self { | |
| 141 | + Self { | |
| 142 | + health_thresholds: HealthThresholds::default(), | |
| 143 | + minting_rates: MintingRates::default(), | |
| 144 | + burning_policies: BurningPolicies::default(), | |
| 145 | + last_operations: OperationTimestamps { | |
| 146 | + last_mint: Utc::now(), | |
| 147 | + last_burn: Utc::now(), | |
| 148 | + last_health_check: Utc::now(), | |
| 149 | + last_emergency_action: Utc::now(), | |
| 150 | + }, | |
| 151 | + performance_history: Vec::new(), | |
| 152 | + } | |
| 153 | + } | |
| 154 | +} | |
| 155 | + | |
| 156 | +/// Network health-based token operations | |
| 157 | +pub struct NetworkHealthController { | |
| 158 | + minter: HealthBasedMinter, | |
| 159 | + token_manager: TokenEconomicsManager, | |
| 160 | + zephyr_coin: ZephyrCoin, | |
| 161 | + events: Vec<TokenEvent>, | |
| 162 | +} | |
| 163 | + | |
| 164 | +impl NetworkHealthController { | |
| 165 | + /// Create new health-based controller | |
| 166 | + pub fn new( | |
| 167 | + minter: HealthBasedMinter, | |
| 168 | + token_manager: TokenEconomicsManager, | |
| 169 | + zephyr_coin: ZephyrCoin, | |
| 170 | + ) -> Self { | |
| 171 | + Self { | |
| 172 | + minter, | |
| 173 | + token_manager, | |
| 174 | + zephyr_coin, | |
| 175 | + events: Vec::new(), | |
| 176 | + } | |
| 177 | + } | |
| 178 | + | |
| 179 | + /// Calculate network health score | |
| 180 | + pub fn calculate_health_score(&self, metrics: &NetworkHealthMetrics) -> f64 { | |
| 181 | + let uptime_score = (metrics.average_uptime / 100.0).min(1.0); | |
| 182 | + let diversity_score = (metrics.geographic_diversity / 100.0).min(1.0); | |
| 183 | + let durability_score = (metrics.data_durability / 100.0).min(1.0); | |
| 184 | + let utilization_score = self.calculate_utilization_score(metrics.utilization_rate); | |
| 185 | + let volunteer_score = self.calculate_volunteer_score(metrics.active_volunteers); | |
| 186 | + | |
| 187 | + // Weighted health score | |
| 188 | + let weights = [0.25, 0.20, 0.25, 0.15, 0.15]; // uptime, diversity, durability, utilization, volunteers | |
| 189 | + let scores = [uptime_score, diversity_score, durability_score, utilization_score, volunteer_score]; | |
| 190 | + | |
| 191 | + scores.iter().zip(weights.iter()) | |
| 192 | + .map(|(score, weight)| score * weight) | |
| 193 | + .sum::<f64>() * 100.0 | |
| 194 | + } | |
| 195 | + | |
| 196 | + /// Calculate utilization score (optimal range: 70-85%) | |
| 197 | + fn calculate_utilization_score(&self, utilization: f64) -> f64 { | |
| 198 | + let min = self.minter.health_thresholds.target_utilization_min; | |
| 199 | + let max = self.minter.health_thresholds.target_utilization_max; | |
| 200 | + | |
| 201 | + if utilization >= min && utilization <= max { | |
| 202 | + 1.0 // Perfect utilization | |
| 203 | + } else if utilization < min { | |
| 204 | + utilization / min // Underutilized | |
| 205 | + } else { | |
| 206 | + // Overutilized - exponential penalty | |
| 207 | + (1.0 / (1.0 + (utilization - max) / 10.0)).max(0.1) | |
| 208 | + } | |
| 209 | + } | |
| 210 | + | |
| 211 | + /// Calculate volunteer participation score | |
| 212 | + fn calculate_volunteer_score(&self, volunteers: u32) -> f64 { | |
| 213 | + let min = self.minter.health_thresholds.min_active_volunteers; | |
| 214 | + if volunteers >= min { | |
| 215 | + (volunteers as f64 / (min as f64 * 2.0)).min(1.0) | |
| 216 | + } else { | |
| 217 | + volunteers as f64 / min as f64 | |
| 218 | + } | |
| 219 | + } | |
| 220 | + | |
| 221 | + /// Determine health status from score | |
| 222 | + pub fn determine_health_status(&self, health_score: f64) -> HealthStatus { | |
| 223 | + if health_score >= 98.0 { | |
| 224 | + HealthStatus::Excellent | |
| 225 | + } else if health_score >= 90.0 { | |
| 226 | + HealthStatus::Good | |
| 227 | + } else if health_score >= 80.0 { | |
| 228 | + HealthStatus::Fair | |
| 229 | + } else if health_score >= 50.0 { | |
| 230 | + HealthStatus::Poor | |
| 231 | + } else { | |
| 232 | + HealthStatus::Emergency | |
| 233 | + } | |
| 234 | + } | |
| 235 | + | |
| 236 | + /// Execute health-based minting | |
| 237 | + pub async fn execute_health_based_minting(&mut self, metrics: NetworkHealthMetrics) -> Result<u64> { | |
| 238 | + let health_score = self.calculate_health_score(&metrics); | |
| 239 | + let health_status = self.determine_health_status(health_score); | |
| 240 | + | |
| 241 | + // Determine minting multiplier based on health | |
| 242 | + let multiplier = match health_status { | |
| 243 | + HealthStatus::Excellent => self.minter.minting_rates.excellent_multiplier, | |
| 244 | + HealthStatus::Good => self.minter.minting_rates.good_multiplier, | |
| 245 | + HealthStatus::Fair => self.minter.minting_rates.fair_multiplier, | |
| 246 | + HealthStatus::Poor => self.minter.minting_rates.poor_multiplier, | |
| 247 | + HealthStatus::Emergency => 0.0, // No minting during emergency | |
| 248 | + }; | |
| 249 | + | |
| 250 | + // Calculate growth bonus | |
| 251 | + let growth_bonus = self.calculate_growth_bonus(&metrics)?; | |
| 252 | + let final_multiplier = multiplier * growth_bonus; | |
| 253 | + | |
| 254 | + // Calculate mint amount | |
| 255 | + let daily_base = metrics.total_capacity_gb * self.minter.minting_rates.base_rate_per_gb; | |
| 256 | + let mint_amount = (daily_base as f64 * final_multiplier) as u64; | |
| 257 | + | |
| 258 | + if mint_amount > 0 && matches!(health_status, HealthStatus::Excellent | HealthStatus::Good | HealthStatus::Fair) { | |
| 259 | + // Execute minting through token contract | |
| 260 | + let mint_event = self.zephyr_coin.mint("network_minter", "reward_pool", mint_amount)?; | |
| 261 | + self.events.push(mint_event); | |
| 262 | + | |
| 263 | + // Update token manager | |
| 264 | + self.token_manager.mint_rewards(mint_amount).await?; | |
| 265 | + | |
| 266 | + // Record performance snapshot | |
| 267 | + self.record_performance_snapshot(metrics, health_score, mint_amount, 0).await; | |
| 268 | + | |
| 269 | + tracing::info!("Minted {} tokens based on network health score: {:.2}%", | |
| 270 | + mint_amount, health_score); | |
| 271 | + | |
| 272 | + self.minter.last_operations.last_mint = Utc::now(); | |
| 273 | + return Ok(mint_amount); | |
| 274 | + } | |
| 275 | + | |
| 276 | + Ok(0) | |
| 277 | + } | |
| 278 | + | |
| 279 | + /// Calculate growth bonus multiplier | |
| 280 | + fn calculate_growth_bonus(&self, metrics: &NetworkHealthMetrics) -> Result<f64> { | |
| 281 | + if self.minter.performance_history.len() < 7 { | |
| 282 | + return Ok(1.0); // No bonus without sufficient history | |
| 283 | + } | |
| 284 | + | |
| 285 | + // Calculate 7-day growth rate | |
| 286 | + let current_capacity = metrics.total_capacity_gb; | |
| 287 | + let week_ago_capacity = self.minter.performance_history | |
| 288 | + .iter() | |
| 289 | + .rev() | |
| 290 | + .nth(6) | |
| 291 | + .map(|snapshot| snapshot.metrics.total_capacity_gb) | |
| 292 | + .unwrap_or(current_capacity); | |
| 293 | + | |
| 294 | + if week_ago_capacity == 0 { | |
| 295 | + return Ok(1.0); | |
| 296 | + } | |
| 297 | + | |
| 298 | + let growth_rate = (current_capacity as f64 - week_ago_capacity as f64) / week_ago_capacity as f64; | |
| 299 | + | |
| 300 | + // Apply growth bonus for healthy growth (10-30% weekly) | |
| 301 | + if growth_rate >= 0.1 && growth_rate <= 0.3 { | |
| 302 | + Ok(self.minter.minting_rates.growth_bonus_multiplier) | |
| 303 | + } else { | |
| 304 | + Ok(1.0) | |
| 305 | + } | |
| 306 | + } | |
| 307 | + | |
| 308 | + /// Execute health-based burning | |
| 309 | + pub async fn execute_health_based_burning(&mut self, metrics: NetworkHealthMetrics) -> Result<u64> { | |
| 310 | + let mut total_burned = 0u64; | |
| 311 | + | |
| 312 | + // Burn unused rewards | |
| 313 | + total_burned += self.burn_unused_rewards().await?; | |
| 314 | + | |
| 315 | + // Emergency burn if oversupply | |
| 316 | + total_burned += self.emergency_burn_oversupply(&metrics).await?; | |
| 317 | + | |
| 318 | + // Congestion burn | |
| 319 | + if self.minter.burning_policies.congestion_burn_enabled { | |
| 320 | + total_burned += self.burn_for_congestion(&metrics).await?; | |
| 321 | + } | |
| 322 | + | |
| 323 | + if total_burned > 0 { | |
| 324 | + self.minter.last_operations.last_burn = Utc::now(); | |
| 325 | + tracing::info!("Burned {} tokens based on network conditions", total_burned); | |
| 326 | + } | |
| 327 | + | |
| 328 | + Ok(total_burned) | |
| 329 | + } | |
| 330 | + | |
| 331 | + /// Burn unused reward tokens | |
| 332 | + async fn burn_unused_rewards(&mut self) -> Result<u64> { | |
| 333 | + let days_since_last = (Utc::now() - self.minter.last_operations.last_burn).num_days(); | |
| 334 | + | |
| 335 | + if days_since_last >= self.minter.burning_policies.unused_reward_burn_days as i64 { | |
| 336 | + let burn_amount = self.token_manager.burn_unused_tokens().await?; | |
| 337 | + | |
| 338 | + if burn_amount > 0 { | |
| 339 | + let burn_event = self.zephyr_coin.burn("network_burner", "reward_pool", burn_amount)?; | |
| 340 | + self.events.push(burn_event); | |
| 341 | + } | |
| 342 | + | |
| 343 | + return Ok(burn_amount); | |
| 344 | + } | |
| 345 | + | |
| 346 | + Ok(0) | |
| 347 | + } | |
| 348 | + | |
| 349 | + /// Emergency burn during token oversupply | |
| 350 | + async fn emergency_burn_oversupply(&mut self, _metrics: &NetworkHealthMetrics) -> Result<u64> { | |
| 351 | + let current_supply = self.zephyr_coin.total_supply; | |
| 352 | + let target_supply = 21_000_000 * 10_u64.pow(18); // 21M tokens | |
| 353 | + let oversupply_threshold = (target_supply as f64 * self.minter.burning_policies.emergency_burn_threshold) as u64; | |
| 354 | + | |
| 355 | + if current_supply > oversupply_threshold { | |
| 356 | + let excess = current_supply - target_supply; | |
| 357 | + let burn_amount = (excess as f64 * 0.1) as u64; // Burn 10% of excess | |
| 358 | + | |
| 359 | + let max_daily_burn = (current_supply as f64 * self.minter.burning_policies.max_daily_burn_percent / 100.0) as u64; | |
| 360 | + let final_burn = burn_amount.min(max_daily_burn); | |
| 361 | + | |
| 362 | + if final_burn > 0 { | |
| 363 | + let burn_event = self.zephyr_coin.burn("emergency_burner", "reward_pool", final_burn)?; | |
| 364 | + self.events.push(burn_event); | |
| 365 | + | |
| 366 | + tracing::warn!("Emergency burn of {} tokens due to oversupply", final_burn); | |
| 367 | + return Ok(final_burn); | |
| 368 | + } | |
| 369 | + } | |
| 370 | + | |
| 371 | + Ok(0) | |
| 372 | + } | |
| 373 | + | |
| 374 | + /// Burn tokens during network congestion | |
| 375 | + async fn burn_for_congestion(&mut self, metrics: &NetworkHealthMetrics) -> Result<u64> { | |
| 376 | + // Burn if utilization > 95% to incentivize capacity expansion | |
| 377 | + if metrics.utilization_rate > 95.0 { | |
| 378 | + let daily_rewards = metrics.total_capacity_gb * self.minter.minting_rates.base_rate_per_gb; | |
| 379 | + let congestion_burn = (daily_rewards as f64 * 0.05) as u64; // 5% of daily rewards | |
| 380 | + | |
| 381 | + if congestion_burn > 0 { | |
| 382 | + let burn_event = self.zephyr_coin.burn("congestion_burner", "reward_pool", congestion_burn)?; | |
| 383 | + self.events.push(burn_event); | |
| 384 | + | |
| 385 | + tracing::info!("Congestion burn of {} tokens (utilization: {:.1}%)", | |
| 386 | + congestion_burn, metrics.utilization_rate); | |
| 387 | + return Ok(congestion_burn); | |
| 388 | + } | |
| 389 | + } | |
| 390 | + | |
| 391 | + Ok(0) | |
| 392 | + } | |
| 393 | + | |
| 394 | + /// Record performance snapshot | |
| 395 | + async fn record_performance_snapshot( | |
| 396 | + &mut self, | |
| 397 | + metrics: NetworkHealthMetrics, | |
| 398 | + health_score: f64, | |
| 399 | + tokens_minted: u64, | |
| 400 | + tokens_burned: u64, | |
| 401 | + ) { | |
| 402 | + let snapshot = PerformanceSnapshot { | |
| 403 | + timestamp: Utc::now(), | |
| 404 | + metrics, | |
| 405 | + health_score, | |
| 406 | + tokens_minted, | |
| 407 | + tokens_burned, | |
| 408 | + active_rewards: self.token_manager.get_supply_status().reward_pool, | |
| 409 | + }; | |
| 410 | + | |
| 411 | + self.minter.performance_history.push(snapshot); | |
| 412 | + | |
| 413 | + // Keep only last 30 days of history | |
| 414 | + if self.minter.performance_history.len() > 30 { | |
| 415 | + self.minter.performance_history.remove(0); | |
| 416 | + } | |
| 417 | + } | |
| 418 | + | |
| 419 | + /// Run automated health monitoring loop | |
| 420 | + pub async fn run_health_monitor(&mut self, check_interval_hours: u64) -> Result<()> { | |
| 421 | + let mut interval = tokio::time::interval(TokioDuration::from_secs(check_interval_hours * 3600)); | |
| 422 | + | |
| 423 | + loop { | |
| 424 | + interval.tick().await; | |
| 425 | + | |
| 426 | + // Get current network metrics (would be fetched from network in real implementation) | |
| 427 | + let metrics = self.get_current_network_metrics().await?; | |
| 428 | + | |
| 429 | + // Update token manager with current metrics | |
| 430 | + self.token_manager.update_network_metrics(metrics.clone()); | |
| 431 | + | |
| 432 | + // Execute health-based operations | |
| 433 | + let minted = self.execute_health_based_minting(metrics.clone()).await?; | |
| 434 | + let burned = self.execute_health_based_burning(metrics.clone()).await?; | |
| 435 | + | |
| 436 | + // Perform token manager adjustments | |
| 437 | + self.token_manager.perform_supply_adjustment().await?; | |
| 438 | + | |
| 439 | + self.minter.last_operations.last_health_check = Utc::now(); | |
| 440 | + | |
| 441 | + tracing::info!("Health check complete: minted={}, burned={}", minted, burned); | |
| 442 | + } | |
| 443 | + } | |
| 444 | + | |
| 445 | + /// Get current network metrics (placeholder - would fetch from actual network) | |
| 446 | + async fn get_current_network_metrics(&self) -> Result<NetworkHealthMetrics> { | |
| 447 | + // This would fetch real metrics from the ZephyrFS network | |
| 448 | + Ok(NetworkHealthMetrics { | |
| 449 | + total_capacity_gb: 1000, | |
| 450 | + active_volunteers: 50, | |
| 451 | + utilization_rate: 75.0, | |
| 452 | + average_uptime: 96.5, | |
| 453 | + geographic_diversity: 65.0, | |
| 454 | + data_durability: 99.95, | |
| 455 | + }) | |
| 456 | + } | |
| 457 | + | |
| 458 | + /// Get recent events | |
| 459 | + pub fn get_recent_events(&self) -> &[TokenEvent] { | |
| 460 | + &self.events | |
| 461 | + } | |
| 462 | + | |
| 463 | + /// Get performance history | |
| 464 | + pub fn get_performance_history(&self) -> &[PerformanceSnapshot] { | |
| 465 | + &self.minter.performance_history | |
| 466 | + } | |
| 467 | +} | |
| 468 | + | |
| 469 | +#[cfg(test)] | |
| 470 | +mod tests { | |
| 471 | + use super::*; | |
| 472 | + use crate::economics::token_model::TokenEconomics; | |
| 473 | + | |
| 474 | + #[tokio::test] | |
| 475 | + async fn test_health_score_calculation() { | |
| 476 | + let minter = HealthBasedMinter::default(); | |
| 477 | + let token_manager = TokenEconomicsManager::new(TokenEconomics::default()); | |
| 478 | + let zephyr_coin = ZephyrCoin::new("test_owner".to_string(), 0); | |
| 479 | + let controller = NetworkHealthController::new(minter, token_manager, zephyr_coin); | |
| 480 | + | |
| 481 | + let metrics = NetworkHealthMetrics { | |
| 482 | + total_capacity_gb: 1000, | |
| 483 | + active_volunteers: 25, | |
| 484 | + utilization_rate: 75.0, | |
| 485 | + average_uptime: 96.0, | |
| 486 | + geographic_diversity: 60.0, | |
| 487 | + data_durability: 99.9, | |
| 488 | + }; | |
| 489 | + | |
| 490 | + let health_score = controller.calculate_health_score(&metrics); | |
| 491 | + assert!(health_score >= 80.0); // Should be "Good" or better | |
| 492 | + assert!(health_score <= 100.0); | |
| 493 | + } | |
| 494 | + | |
| 495 | + #[tokio::test] | |
| 496 | + async fn test_health_based_minting() { | |
| 497 | + let minter = HealthBasedMinter::default(); | |
| 498 | + let token_manager = TokenEconomicsManager::new(TokenEconomics::default()); | |
| 499 | + let mut zephyr_coin = ZephyrCoin::new("test_owner".to_string(), 0); | |
| 500 | + zephyr_coin.add_minter("test_owner", "network_minter").unwrap(); | |
| 501 | + | |
| 502 | + let mut controller = NetworkHealthController::new(minter, token_manager, zephyr_coin); | |
| 503 | + | |
| 504 | + let metrics = NetworkHealthMetrics { | |
| 505 | + total_capacity_gb: 100, | |
| 506 | + active_volunteers: 15, | |
| 507 | + utilization_rate: 75.0, | |
| 508 | + average_uptime: 98.0, | |
| 509 | + geographic_diversity: 70.0, | |
| 510 | + data_durability: 99.95, | |
| 511 | + }; | |
| 512 | + | |
| 513 | + let minted = controller.execute_health_based_minting(metrics).await.unwrap(); | |
| 514 | + assert!(minted > 0); | |
| 515 | + } | |
| 516 | +} | |
src/economics/payment_processor.rsadded@@ -0,0 +1,822 @@ | ||
| 1 | +//! Multi-Currency Payment Processing Engine | |
| 2 | +//! | |
| 3 | +//! Handles payouts in multiple currencies for ZephyrFS volunteers | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::{HashMap, VecDeque}; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | + | |
| 10 | +/// Multi-currency payment processor | |
| 11 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 12 | +pub struct PaymentProcessor { | |
| 13 | + /// Supported payment methods | |
| 14 | + pub payment_methods: HashMap<PaymentMethod, PaymentMethodConfig>, | |
| 15 | + /// Exchange rates for currency conversion | |
| 16 | + pub exchange_rates: HashMap<Currency, f64>, | |
| 17 | + /// Payment processing fees | |
| 18 | + pub fee_structure: PaymentFeeStructure, | |
| 19 | + /// Pending payments queue | |
| 20 | + pub pending_payments: VecDeque<PaymentRequest>, | |
| 21 | + /// Payment history | |
| 22 | + pub payment_history: HashMap<String, Vec<PaymentRecord>>, | |
| 23 | + /// Minimum payout thresholds | |
| 24 | + pub min_payout_thresholds: HashMap<Currency, u64>, | |
| 25 | + /// Processor configuration | |
| 26 | + pub config: ProcessorConfig, | |
| 27 | +} | |
| 28 | + | |
| 29 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 30 | +pub enum PaymentMethod { | |
| 31 | + Cryptocurrency(CryptoNetwork), | |
| 32 | + BankTransfer(BankTransferType), | |
| 33 | + DigitalWallet(WalletProvider), | |
| 34 | + StableCoin(StableCoinType), | |
| 35 | +} | |
| 36 | + | |
| 37 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 38 | +pub enum CryptoNetwork { | |
| 39 | + Bitcoin, | |
| 40 | + Ethereum, | |
| 41 | + Polygon, | |
| 42 | + BinanceSmartChain, | |
| 43 | + Solana, | |
| 44 | + Cardano, | |
| 45 | + ZephyrCoin, // Native token | |
| 46 | +} | |
| 47 | + | |
| 48 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 49 | +pub enum BankTransferType { | |
| 50 | + ACH, // US | |
| 51 | + SEPA, // Europe | |
| 52 | + FasterPayments, // UK | |
| 53 | + Interac, // Canada | |
| 54 | + PIX, // Brazil | |
| 55 | + UPI, // India | |
| 56 | +} | |
| 57 | + | |
| 58 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 59 | +pub enum WalletProvider { | |
| 60 | + PayPal, | |
| 61 | + Wise, | |
| 62 | + Revolut, | |
| 63 | + CashApp, | |
| 64 | + Venmo, | |
| 65 | + Zelle, | |
| 66 | +} | |
| 67 | + | |
| 68 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 69 | +pub enum StableCoinType { | |
| 70 | + USDC, | |
| 71 | + USDT, | |
| 72 | + DAI, | |
| 73 | + BUSD, | |
| 74 | +} | |
| 75 | + | |
| 76 | +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] | |
| 77 | +pub enum Currency { | |
| 78 | + USD, | |
| 79 | + EUR, | |
| 80 | + GBP, | |
| 81 | + CAD, | |
| 82 | + AUD, | |
| 83 | + BRL, | |
| 84 | + INR, | |
| 85 | + JPY, | |
| 86 | + ZephyrCoin, | |
| 87 | + Bitcoin, | |
| 88 | + Ethereum, | |
| 89 | +} | |
| 90 | + | |
| 91 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 92 | +pub struct PaymentMethodConfig { | |
| 93 | + pub enabled: bool, | |
| 94 | + pub min_amount: u64, | |
| 95 | + pub max_amount: u64, | |
| 96 | + pub processing_time_hours: u32, | |
| 97 | + pub supported_currencies: Vec<Currency>, | |
| 98 | + pub geographic_restrictions: Vec<String>, // ISO country codes | |
| 99 | + pub requires_kyc: bool, | |
| 100 | + pub fee_structure: MethodFeeStructure, | |
| 101 | +} | |
| 102 | + | |
| 103 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 104 | +pub struct MethodFeeStructure { | |
| 105 | + pub fixed_fee: u64, // Fixed fee in cents/wei | |
| 106 | + pub percentage_fee: f64, // Percentage fee (0.01 = 1%) | |
| 107 | + pub network_fee: u64, // Blockchain network fees | |
| 108 | + pub exchange_fee: f64, // Currency conversion fee | |
| 109 | +} | |
| 110 | + | |
| 111 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 112 | +pub struct PaymentFeeStructure { | |
| 113 | + /// Base processing fee (0.5%) | |
| 114 | + pub base_processing_fee: f64, | |
| 115 | + /// Currency conversion fees | |
| 116 | + pub conversion_fees: HashMap<Currency, f64>, | |
| 117 | + /// Express processing fee (1%) | |
| 118 | + pub express_fee: f64, | |
| 119 | + /// KYC verification fee | |
| 120 | + pub kyc_fee: u64, | |
| 121 | +} | |
| 122 | + | |
| 123 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 124 | +pub struct PaymentRequest { | |
| 125 | + pub request_id: String, | |
| 126 | + pub volunteer_id: String, | |
| 127 | + pub amount_tokens: u64, | |
| 128 | + pub target_currency: Currency, | |
| 129 | + pub payment_method: PaymentMethod, | |
| 130 | + pub recipient_info: RecipientInfo, | |
| 131 | + pub priority: PaymentPriority, | |
| 132 | + pub created_at: DateTime<Utc>, | |
| 133 | + pub scheduled_for: Option<DateTime<Utc>>, | |
| 134 | + pub metadata: HashMap<String, String>, | |
| 135 | +} | |
| 136 | + | |
| 137 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 138 | +pub struct RecipientInfo { | |
| 139 | + pub wallet_address: Option<String>, | |
| 140 | + pub bank_account: Option<BankAccountInfo>, | |
| 141 | + pub digital_wallet: Option<DigitalWalletInfo>, | |
| 142 | + pub kyc_verified: bool, | |
| 143 | + pub tax_info: Option<TaxInfo>, | |
| 144 | +} | |
| 145 | + | |
| 146 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 147 | +pub struct BankAccountInfo { | |
| 148 | + pub account_holder: String, | |
| 149 | + pub account_number: String, | |
| 150 | + pub routing_number: String, | |
| 151 | + pub bank_name: String, | |
| 152 | + pub swift_code: Option<String>, | |
| 153 | + pub iban: Option<String>, | |
| 154 | + pub country_code: String, | |
| 155 | +} | |
| 156 | + | |
| 157 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 158 | +pub struct DigitalWalletInfo { | |
| 159 | + pub provider: WalletProvider, | |
| 160 | + pub wallet_id: String, | |
| 161 | + pub verified: bool, | |
| 162 | +} | |
| 163 | + | |
| 164 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 165 | +pub struct TaxInfo { | |
| 166 | + pub tax_id: String, | |
| 167 | + pub tax_country: String, | |
| 168 | + pub tax_exempt: bool, | |
| 169 | + pub withholding_rate: f64, | |
| 170 | +} | |
| 171 | + | |
| 172 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 173 | +pub enum PaymentPriority { | |
| 174 | + Standard, | |
| 175 | + Express, | |
| 176 | + Immediate, | |
| 177 | +} | |
| 178 | + | |
| 179 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 180 | +pub struct PaymentRecord { | |
| 181 | + pub payment_id: String, | |
| 182 | + pub request_id: String, | |
| 183 | + pub volunteer_id: String, | |
| 184 | + pub amount_tokens: u64, | |
| 185 | + pub amount_paid: u64, | |
| 186 | + pub currency: Currency, | |
| 187 | + pub payment_method: PaymentMethod, | |
| 188 | + pub status: PaymentStatus, | |
| 189 | + pub fees_paid: u64, | |
| 190 | + pub exchange_rate: f64, | |
| 191 | + pub processed_at: DateTime<Utc>, | |
| 192 | + pub confirmation_hash: Option<String>, | |
| 193 | + pub error_message: Option<String>, | |
| 194 | +} | |
| 195 | + | |
| 196 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 197 | +pub enum PaymentStatus { | |
| 198 | + Pending, | |
| 199 | + Processing, | |
| 200 | + Completed, | |
| 201 | + Failed, | |
| 202 | + Cancelled, | |
| 203 | + RequiresAction, | |
| 204 | +} | |
| 205 | + | |
| 206 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 207 | +pub struct ProcessorConfig { | |
| 208 | + pub batch_processing_enabled: bool, | |
| 209 | + pub batch_size: usize, | |
| 210 | + pub processing_interval_minutes: u32, | |
| 211 | + pub max_retries: u32, | |
| 212 | + pub retry_delay_minutes: u32, | |
| 213 | + pub auto_currency_conversion: bool, | |
| 214 | + pub fraud_detection_enabled: bool, | |
| 215 | +} | |
| 216 | + | |
| 217 | +impl Default for PaymentFeeStructure { | |
| 218 | + fn default() -> Self { | |
| 219 | + let mut conversion_fees = HashMap::new(); | |
| 220 | + conversion_fees.insert(Currency::USD, 0.005); | |
| 221 | + conversion_fees.insert(Currency::EUR, 0.005); | |
| 222 | + conversion_fees.insert(Currency::GBP, 0.005); | |
| 223 | + conversion_fees.insert(Currency::Bitcoin, 0.01); | |
| 224 | + conversion_fees.insert(Currency::Ethereum, 0.008); | |
| 225 | + conversion_fees.insert(Currency::ZephyrCoin, 0.0); | |
| 226 | + | |
| 227 | + Self { | |
| 228 | + base_processing_fee: 0.005, // 0.5% | |
| 229 | + conversion_fees, | |
| 230 | + express_fee: 0.01, // 1% | |
| 231 | + kyc_fee: 500, // $5 equivalent | |
| 232 | + } | |
| 233 | + } | |
| 234 | +} | |
| 235 | + | |
| 236 | +impl Default for ProcessorConfig { | |
| 237 | + fn default() -> Self { | |
| 238 | + Self { | |
| 239 | + batch_processing_enabled: true, | |
| 240 | + batch_size: 100, | |
| 241 | + processing_interval_minutes: 30, | |
| 242 | + max_retries: 3, | |
| 243 | + retry_delay_minutes: 60, | |
| 244 | + auto_currency_conversion: true, | |
| 245 | + fraud_detection_enabled: true, | |
| 246 | + } | |
| 247 | + } | |
| 248 | +} | |
| 249 | + | |
| 250 | +impl PaymentProcessor { | |
| 251 | + /// Create new payment processor | |
| 252 | + pub fn new() -> Self { | |
| 253 | + let mut processor = Self { | |
| 254 | + payment_methods: HashMap::new(), | |
| 255 | + exchange_rates: HashMap::new(), | |
| 256 | + fee_structure: PaymentFeeStructure::default(), | |
| 257 | + pending_payments: VecDeque::new(), | |
| 258 | + payment_history: HashMap::new(), | |
| 259 | + min_payout_thresholds: HashMap::new(), | |
| 260 | + config: ProcessorConfig::default(), | |
| 261 | + }; | |
| 262 | + | |
| 263 | + processor.initialize_payment_methods(); | |
| 264 | + processor.initialize_exchange_rates(); | |
| 265 | + processor.initialize_payout_thresholds(); | |
| 266 | + | |
| 267 | + processor | |
| 268 | + } | |
| 269 | + | |
| 270 | + /// Initialize supported payment methods | |
| 271 | + fn initialize_payment_methods(&mut self) { | |
| 272 | + // Cryptocurrency methods | |
| 273 | + self.payment_methods.insert( | |
| 274 | + PaymentMethod::Cryptocurrency(CryptoNetwork::ZephyrCoin), | |
| 275 | + PaymentMethodConfig { | |
| 276 | + enabled: true, | |
| 277 | + min_amount: 1_000_000_000_000_000_000, // 1 ZEPH | |
| 278 | + max_amount: 1_000_000 * 1_000_000_000_000_000_000, // 1M ZEPH | |
| 279 | + processing_time_hours: 1, | |
| 280 | + supported_currencies: vec![Currency::ZephyrCoin], | |
| 281 | + geographic_restrictions: vec![], | |
| 282 | + requires_kyc: false, | |
| 283 | + fee_structure: MethodFeeStructure { | |
| 284 | + fixed_fee: 0, | |
| 285 | + percentage_fee: 0.0, | |
| 286 | + network_fee: 100_000_000_000_000, // 0.0001 ZEPH | |
| 287 | + exchange_fee: 0.0, | |
| 288 | + }, | |
| 289 | + }, | |
| 290 | + ); | |
| 291 | + | |
| 292 | + self.payment_methods.insert( | |
| 293 | + PaymentMethod::Cryptocurrency(CryptoNetwork::Ethereum), | |
| 294 | + PaymentMethodConfig { | |
| 295 | + enabled: true, | |
| 296 | + min_amount: 10_000_000_000_000_000, // 0.01 ETH | |
| 297 | + max_amount: 1000 * 1_000_000_000_000_000_000, // 1000 ETH | |
| 298 | + processing_time_hours: 1, | |
| 299 | + supported_currencies: vec![Currency::Ethereum, Currency::USD], | |
| 300 | + geographic_restrictions: vec![], | |
| 301 | + requires_kyc: false, | |
| 302 | + fee_structure: MethodFeeStructure { | |
| 303 | + fixed_fee: 0, | |
| 304 | + percentage_fee: 0.003, | |
| 305 | + network_fee: 5_000_000_000_000_000, // ~$10 gas fee | |
| 306 | + exchange_fee: 0.005, | |
| 307 | + }, | |
| 308 | + }, | |
| 309 | + ); | |
| 310 | + | |
| 311 | + // Bank transfer methods | |
| 312 | + self.payment_methods.insert( | |
| 313 | + PaymentMethod::BankTransfer(BankTransferType::ACH), | |
| 314 | + PaymentMethodConfig { | |
| 315 | + enabled: true, | |
| 316 | + min_amount: 1000, // $10 | |
| 317 | + max_amount: 1_000_000_00, // $10,000 | |
| 318 | + processing_time_hours: 48, | |
| 319 | + supported_currencies: vec![Currency::USD], | |
| 320 | + geographic_restrictions: vec!["US".to_string()], | |
| 321 | + requires_kyc: true, | |
| 322 | + fee_structure: MethodFeeStructure { | |
| 323 | + fixed_fee: 100, // $1 | |
| 324 | + percentage_fee: 0.001, | |
| 325 | + network_fee: 0, | |
| 326 | + exchange_fee: 0.005, | |
| 327 | + }, | |
| 328 | + }, | |
| 329 | + ); | |
| 330 | + | |
| 331 | + self.payment_methods.insert( | |
| 332 | + PaymentMethod::BankTransfer(BankTransferType::SEPA), | |
| 333 | + PaymentMethodConfig { | |
| 334 | + enabled: true, | |
| 335 | + min_amount: 1000, // €10 | |
| 336 | + max_amount: 1_000_000_00, // €10,000 | |
| 337 | + processing_time_hours: 24, | |
| 338 | + supported_currencies: vec![Currency::EUR], | |
| 339 | + geographic_restrictions: vec!["EU".to_string()], | |
| 340 | + requires_kyc: true, | |
| 341 | + fee_structure: MethodFeeStructure { | |
| 342 | + fixed_fee: 50, // €0.50 | |
| 343 | + percentage_fee: 0.001, | |
| 344 | + network_fee: 0, | |
| 345 | + exchange_fee: 0.005, | |
| 346 | + }, | |
| 347 | + }, | |
| 348 | + ); | |
| 349 | + | |
| 350 | + // Digital wallet methods | |
| 351 | + self.payment_methods.insert( | |
| 352 | + PaymentMethod::DigitalWallet(WalletProvider::PayPal), | |
| 353 | + PaymentMethodConfig { | |
| 354 | + enabled: true, | |
| 355 | + min_amount: 500, // $5 | |
| 356 | + max_amount: 500_000_00, // $5,000 | |
| 357 | + processing_time_hours: 2, | |
| 358 | + supported_currencies: vec![Currency::USD, Currency::EUR, Currency::GBP], | |
| 359 | + geographic_restrictions: vec![], // Global | |
| 360 | + requires_kyc: true, | |
| 361 | + fee_structure: MethodFeeStructure { | |
| 362 | + fixed_fee: 30, // $0.30 | |
| 363 | + percentage_fee: 0.029, // 2.9% | |
| 364 | + network_fee: 0, | |
| 365 | + exchange_fee: 0.035, // 3.5% for currency conversion | |
| 366 | + }, | |
| 367 | + }, | |
| 368 | + ); | |
| 369 | + | |
| 370 | + // Stablecoin methods | |
| 371 | + self.payment_methods.insert( | |
| 372 | + PaymentMethod::StableCoin(StableCoinType::USDC), | |
| 373 | + PaymentMethodConfig { | |
| 374 | + enabled: true, | |
| 375 | + min_amount: 1_000_000, // 1 USDC | |
| 376 | + max_amount: 100_000 * 1_000_000, // 100k USDC | |
| 377 | + processing_time_hours: 1, | |
| 378 | + supported_currencies: vec![Currency::USD], | |
| 379 | + geographic_restrictions: vec![], | |
| 380 | + requires_kyc: false, | |
| 381 | + fee_structure: MethodFeeStructure { | |
| 382 | + fixed_fee: 0, | |
| 383 | + percentage_fee: 0.001, | |
| 384 | + network_fee: 2_000_000_000_000_000, // ~$2 gas fee | |
| 385 | + exchange_fee: 0.001, | |
| 386 | + }, | |
| 387 | + }, | |
| 388 | + ); | |
| 389 | + } | |
| 390 | + | |
| 391 | + /// Initialize exchange rates (would fetch from APIs in production) | |
| 392 | + fn initialize_exchange_rates(&mut self) { | |
| 393 | + self.exchange_rates.insert(Currency::ZephyrCoin, 0.10); // $0.10 per ZEPH | |
| 394 | + self.exchange_rates.insert(Currency::USD, 1.0); | |
| 395 | + self.exchange_rates.insert(Currency::EUR, 0.85); | |
| 396 | + self.exchange_rates.insert(Currency::GBP, 0.73); | |
| 397 | + self.exchange_rates.insert(Currency::Bitcoin, 45000.0); | |
| 398 | + self.exchange_rates.insert(Currency::Ethereum, 2500.0); | |
| 399 | + } | |
| 400 | + | |
| 401 | + /// Initialize minimum payout thresholds | |
| 402 | + fn initialize_payout_thresholds(&mut self) { | |
| 403 | + self.min_payout_thresholds.insert(Currency::ZephyrCoin, 10 * 1_000_000_000_000_000_000); // 10 ZEPH | |
| 404 | + self.min_payout_thresholds.insert(Currency::USD, 1000); // $10 | |
| 405 | + self.min_payout_thresholds.insert(Currency::EUR, 850); // €8.50 | |
| 406 | + self.min_payout_thresholds.insert(Currency::Bitcoin, 22222); // ~$10 worth | |
| 407 | + self.min_payout_thresholds.insert(Currency::Ethereum, 400000); // ~$10 worth | |
| 408 | + } | |
| 409 | + | |
| 410 | + /// Submit payment request | |
| 411 | + pub fn submit_payment_request(&mut self, mut request: PaymentRequest) -> Result<String> { | |
| 412 | + // Validate payment method | |
| 413 | + let method_config = self.payment_methods.get(&request.payment_method) | |
| 414 | + .ok_or_else(|| anyhow::anyhow!("Payment method not supported"))?; | |
| 415 | + | |
| 416 | + if !method_config.enabled { | |
| 417 | + return Err(anyhow::anyhow!("Payment method temporarily disabled")); | |
| 418 | + } | |
| 419 | + | |
| 420 | + // Convert amount to target currency | |
| 421 | + let amount_in_currency = self.convert_tokens_to_currency( | |
| 422 | + request.amount_tokens, | |
| 423 | + &request.target_currency, | |
| 424 | + )?; | |
| 425 | + | |
| 426 | + // Check minimum threshold | |
| 427 | + if let Some(&min_threshold) = self.min_payout_thresholds.get(&request.target_currency) { | |
| 428 | + if amount_in_currency < min_threshold { | |
| 429 | + return Err(anyhow::anyhow!( | |
| 430 | + "Amount below minimum payout threshold: {} < {}", | |
| 431 | + amount_in_currency, min_threshold | |
| 432 | + )); | |
| 433 | + } | |
| 434 | + } | |
| 435 | + | |
| 436 | + // Check method limits | |
| 437 | + if amount_in_currency < method_config.min_amount || amount_in_currency > method_config.max_amount { | |
| 438 | + return Err(anyhow::anyhow!( | |
| 439 | + "Amount outside payment method limits: {} not in [{}, {}]", | |
| 440 | + amount_in_currency, method_config.min_amount, method_config.max_amount | |
| 441 | + )); | |
| 442 | + } | |
| 443 | + | |
| 444 | + // Validate recipient info | |
| 445 | + self.validate_recipient_info(&request.recipient_info, &request.payment_method)?; | |
| 446 | + | |
| 447 | + // Generate request ID | |
| 448 | + request.request_id = format!("pay_{}_{}", | |
| 449 | + chrono::Utc::now().timestamp(), | |
| 450 | + &request.volunteer_id[..8] | |
| 451 | + ); | |
| 452 | + | |
| 453 | + self.pending_payments.push_back(request.clone()); | |
| 454 | + | |
| 455 | + tracing::info!("Payment request submitted: {} for {} tokens", | |
| 456 | + request.request_id, request.amount_tokens); | |
| 457 | + | |
| 458 | + Ok(request.request_id) | |
| 459 | + } | |
| 460 | + | |
| 461 | + /// Convert tokens to target currency | |
| 462 | + fn convert_tokens_to_currency(&self, tokens: u64, target_currency: &Currency) -> Result<u64> { | |
| 463 | + if *target_currency == Currency::ZephyrCoin { | |
| 464 | + return Ok(tokens); | |
| 465 | + } | |
| 466 | + | |
| 467 | + let zeph_rate = self.exchange_rates.get(&Currency::ZephyrCoin) | |
| 468 | + .ok_or_else(|| anyhow::anyhow!("ZephyrCoin exchange rate not available"))?; | |
| 469 | + | |
| 470 | + let target_rate = self.exchange_rates.get(target_currency) | |
| 471 | + .ok_or_else(|| anyhow::anyhow!("Target currency exchange rate not available"))?; | |
| 472 | + | |
| 473 | + // Convert tokens to USD value, then to target currency | |
| 474 | + let usd_value = (tokens as f64 / 1_000_000_000_000_000_000.0) * zeph_rate; | |
| 475 | + let target_value = usd_value / target_rate; | |
| 476 | + | |
| 477 | + // Convert to smallest units (cents, wei, etc.) | |
| 478 | + let target_amount = match target_currency { | |
| 479 | + Currency::USD | Currency::EUR | Currency::GBP => (target_value * 100.0) as u64, | |
| 480 | + Currency::Bitcoin => (target_value * 100_000_000.0) as u64, // Satoshis | |
| 481 | + Currency::Ethereum => (target_value * 1_000_000_000_000_000_000.0) as u64, // Wei | |
| 482 | + _ => (target_value * 1_000_000.0) as u64, // Default 6 decimals | |
| 483 | + }; | |
| 484 | + | |
| 485 | + Ok(target_amount) | |
| 486 | + } | |
| 487 | + | |
| 488 | + /// Validate recipient information | |
| 489 | + fn validate_recipient_info(&self, info: &RecipientInfo, method: &PaymentMethod) -> Result<()> { | |
| 490 | + match method { | |
| 491 | + PaymentMethod::Cryptocurrency(_) => { | |
| 492 | + if info.wallet_address.is_none() { | |
| 493 | + return Err(anyhow::anyhow!("Wallet address required for crypto payments")); | |
| 494 | + } | |
| 495 | + }, | |
| 496 | + PaymentMethod::BankTransfer(_) => { | |
| 497 | + if info.bank_account.is_none() { | |
| 498 | + return Err(anyhow::anyhow!("Bank account info required for bank transfers")); | |
| 499 | + } | |
| 500 | + if !info.kyc_verified { | |
| 501 | + return Err(anyhow::anyhow!("KYC verification required for bank transfers")); | |
| 502 | + } | |
| 503 | + }, | |
| 504 | + PaymentMethod::DigitalWallet(_) => { | |
| 505 | + if info.digital_wallet.is_none() { | |
| 506 | + return Err(anyhow::anyhow!("Digital wallet info required")); | |
| 507 | + } | |
| 508 | + }, | |
| 509 | + PaymentMethod::StableCoin(_) => { | |
| 510 | + if info.wallet_address.is_none() { | |
| 511 | + return Err(anyhow::anyhow!("Wallet address required for stablecoin payments")); | |
| 512 | + } | |
| 513 | + }, | |
| 514 | + } | |
| 515 | + | |
| 516 | + Ok(()) | |
| 517 | + } | |
| 518 | + | |
| 519 | + /// Process pending payments | |
| 520 | + pub async fn process_pending_payments(&mut self) -> Result<Vec<PaymentRecord>> { | |
| 521 | + let mut processed = Vec::new(); | |
| 522 | + let batch_size = if self.config.batch_processing_enabled { | |
| 523 | + self.config.batch_size | |
| 524 | + } else { | |
| 525 | + 1 | |
| 526 | + }; | |
| 527 | + | |
| 528 | + for _ in 0..batch_size { | |
| 529 | + if let Some(request) = self.pending_payments.pop_front() { | |
| 530 | + // Check if scheduled for future | |
| 531 | + if let Some(scheduled_time) = request.scheduled_for { | |
| 532 | + if Utc::now() < scheduled_time { | |
| 533 | + // Put back in queue | |
| 534 | + self.pending_payments.push_front(request); | |
| 535 | + break; | |
| 536 | + } | |
| 537 | + } | |
| 538 | + | |
| 539 | + match self.process_single_payment(request).await { | |
| 540 | + Ok(record) => { | |
| 541 | + processed.push(record.clone()); | |
| 542 | + | |
| 543 | + // Add to history | |
| 544 | + self.payment_history | |
| 545 | + .entry(record.volunteer_id.clone()) | |
| 546 | + .or_insert_with(Vec::new) | |
| 547 | + .push(record); | |
| 548 | + }, | |
| 549 | + Err(e) => { | |
| 550 | + tracing::error!("Payment processing failed: {}", e); | |
| 551 | + // Could implement retry logic here | |
| 552 | + } | |
| 553 | + } | |
| 554 | + } else { | |
| 555 | + break; | |
| 556 | + } | |
| 557 | + } | |
| 558 | + | |
| 559 | + Ok(processed) | |
| 560 | + } | |
| 561 | + | |
| 562 | + /// Process single payment | |
| 563 | + async fn process_single_payment(&self, request: PaymentRequest) -> Result<PaymentRecord> { | |
| 564 | + let payment_id = format!("tx_{}_{}", | |
| 565 | + chrono::Utc::now().timestamp_millis(), | |
| 566 | + &request.volunteer_id[..6] | |
| 567 | + ); | |
| 568 | + | |
| 569 | + // Calculate fees | |
| 570 | + let fees = self.calculate_payment_fees(&request)?; | |
| 571 | + | |
| 572 | + // Convert amount | |
| 573 | + let amount_in_currency = self.convert_tokens_to_currency( | |
| 574 | + request.amount_tokens, | |
| 575 | + &request.target_currency, | |
| 576 | + )?; | |
| 577 | + | |
| 578 | + let net_amount = amount_in_currency.saturating_sub(fees.total_fee); | |
| 579 | + | |
| 580 | + // Execute payment based on method | |
| 581 | + let (status, confirmation_hash, error_message) = match request.payment_method { | |
| 582 | + PaymentMethod::Cryptocurrency(_) => { | |
| 583 | + self.execute_crypto_payment(&request, net_amount).await? | |
| 584 | + }, | |
| 585 | + PaymentMethod::BankTransfer(_) => { | |
| 586 | + self.execute_bank_transfer(&request, net_amount).await? | |
| 587 | + }, | |
| 588 | + PaymentMethod::DigitalWallet(_) => { | |
| 589 | + self.execute_wallet_payment(&request, net_amount).await? | |
| 590 | + }, | |
| 591 | + PaymentMethod::StableCoin(_) => { | |
| 592 | + self.execute_stablecoin_payment(&request, net_amount).await? | |
| 593 | + }, | |
| 594 | + }; | |
| 595 | + | |
| 596 | + let record = PaymentRecord { | |
| 597 | + payment_id, | |
| 598 | + request_id: request.request_id, | |
| 599 | + volunteer_id: request.volunteer_id, | |
| 600 | + amount_tokens: request.amount_tokens, | |
| 601 | + amount_paid: net_amount, | |
| 602 | + currency: request.target_currency, | |
| 603 | + payment_method: request.payment_method, | |
| 604 | + status, | |
| 605 | + fees_paid: fees.total_fee, | |
| 606 | + exchange_rate: self.get_exchange_rate(&Currency::ZephyrCoin, &request.target_currency)?, | |
| 607 | + processed_at: Utc::now(), | |
| 608 | + confirmation_hash, | |
| 609 | + error_message, | |
| 610 | + }; | |
| 611 | + | |
| 612 | + tracing::info!("Payment processed: {} - {} {:?}", | |
| 613 | + record.payment_id, record.amount_paid, record.currency); | |
| 614 | + | |
| 615 | + Ok(record) | |
| 616 | + } | |
| 617 | + | |
| 618 | + /// Calculate payment fees | |
| 619 | + fn calculate_payment_fees(&self, request: &PaymentRequest) -> Result<PaymentFees> { | |
| 620 | + let method_config = self.payment_methods.get(&request.payment_method) | |
| 621 | + .ok_or_else(|| anyhow::anyhow!("Payment method not found"))?; | |
| 622 | + | |
| 623 | + let amount_in_currency = self.convert_tokens_to_currency( | |
| 624 | + request.amount_tokens, | |
| 625 | + &request.target_currency, | |
| 626 | + )?; | |
| 627 | + | |
| 628 | + let base_fee = (amount_in_currency as f64 * self.fee_structure.base_processing_fee) as u64; | |
| 629 | + let method_percentage_fee = (amount_in_currency as f64 * method_config.fee_structure.percentage_fee) as u64; | |
| 630 | + let method_fixed_fee = method_config.fee_structure.fixed_fee; | |
| 631 | + let network_fee = method_config.fee_structure.network_fee; | |
| 632 | + | |
| 633 | + let exchange_fee = if request.target_currency != Currency::ZephyrCoin { | |
| 634 | + let exchange_rate = method_config.fee_structure.exchange_fee; | |
| 635 | + (amount_in_currency as f64 * exchange_rate) as u64 | |
| 636 | + } else { | |
| 637 | + 0 | |
| 638 | + }; | |
| 639 | + | |
| 640 | + let express_fee = if matches!(request.priority, PaymentPriority::Express | PaymentPriority::Immediate) { | |
| 641 | + (amount_in_currency as f64 * self.fee_structure.express_fee) as u64 | |
| 642 | + } else { | |
| 643 | + 0 | |
| 644 | + }; | |
| 645 | + | |
| 646 | + let total_fee = base_fee + method_percentage_fee + method_fixed_fee + network_fee + exchange_fee + express_fee; | |
| 647 | + | |
| 648 | + Ok(PaymentFees { | |
| 649 | + base_fee, | |
| 650 | + method_percentage_fee, | |
| 651 | + method_fixed_fee, | |
| 652 | + network_fee, | |
| 653 | + exchange_fee, | |
| 654 | + express_fee, | |
| 655 | + total_fee, | |
| 656 | + }) | |
| 657 | + } | |
| 658 | + | |
| 659 | + /// Execute cryptocurrency payment | |
| 660 | + async fn execute_crypto_payment( | |
| 661 | + &self, | |
| 662 | + request: &PaymentRequest, | |
| 663 | + amount: u64, | |
| 664 | + ) -> Result<(PaymentStatus, Option<String>, Option<String>)> { | |
| 665 | + // Simulate crypto transaction | |
| 666 | + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; | |
| 667 | + | |
| 668 | + let confirmation_hash = format!("0x{:x}", rand::random::<u64>()); | |
| 669 | + | |
| 670 | + Ok((PaymentStatus::Completed, Some(confirmation_hash), None)) | |
| 671 | + } | |
| 672 | + | |
| 673 | + /// Execute bank transfer | |
| 674 | + async fn execute_bank_transfer( | |
| 675 | + &self, | |
| 676 | + request: &PaymentRequest, | |
| 677 | + amount: u64, | |
| 678 | + ) -> Result<(PaymentStatus, Option<String>, Option<String>)> { | |
| 679 | + // Simulate bank transfer processing | |
| 680 | + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; | |
| 681 | + | |
| 682 | + let reference = format!("TXN{}", chrono::Utc::now().timestamp()); | |
| 683 | + | |
| 684 | + Ok((PaymentStatus::Processing, Some(reference), None)) | |
| 685 | + } | |
| 686 | + | |
| 687 | + /// Execute digital wallet payment | |
| 688 | + async fn execute_wallet_payment( | |
| 689 | + &self, | |
| 690 | + request: &PaymentRequest, | |
| 691 | + amount: u64, | |
| 692 | + ) -> Result<(PaymentStatus, Option<String>, Option<String>)> { | |
| 693 | + // Simulate wallet payment | |
| 694 | + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; | |
| 695 | + | |
| 696 | + let transaction_id = format!("WLT{}", chrono::Utc::now().timestamp()); | |
| 697 | + | |
| 698 | + Ok((PaymentStatus::Completed, Some(transaction_id), None)) | |
| 699 | + } | |
| 700 | + | |
| 701 | + /// Execute stablecoin payment | |
| 702 | + async fn execute_stablecoin_payment( | |
| 703 | + &self, | |
| 704 | + request: &PaymentRequest, | |
| 705 | + amount: u64, | |
| 706 | + ) -> Result<(PaymentStatus, Option<String>, Option<String>)> { | |
| 707 | + // Simulate stablecoin transfer | |
| 708 | + tokio::time::sleep(tokio::time::Duration::from_millis(150)).await; | |
| 709 | + | |
| 710 | + let tx_hash = format!("0x{:x}", rand::random::<u64>()); | |
| 711 | + | |
| 712 | + Ok((PaymentStatus::Completed, Some(tx_hash), None)) | |
| 713 | + } | |
| 714 | + | |
| 715 | + /// Get exchange rate between currencies | |
| 716 | + fn get_exchange_rate(&self, from: &Currency, to: &Currency) -> Result<f64> { | |
| 717 | + if from == to { | |
| 718 | + return Ok(1.0); | |
| 719 | + } | |
| 720 | + | |
| 721 | + let from_rate = self.exchange_rates.get(from) | |
| 722 | + .ok_or_else(|| anyhow::anyhow!("Exchange rate not found for {:?}", from))?; | |
| 723 | + | |
| 724 | + let to_rate = self.exchange_rates.get(to) | |
| 725 | + .ok_or_else(|| anyhow::anyhow!("Exchange rate not found for {:?}", to))?; | |
| 726 | + | |
| 727 | + Ok(from_rate / to_rate) | |
| 728 | + } | |
| 729 | + | |
| 730 | + /// Get payment history for volunteer | |
| 731 | + pub fn get_payment_history(&self, volunteer_id: &str) -> Vec<&PaymentRecord> { | |
| 732 | + self.payment_history.get(volunteer_id) | |
| 733 | + .map(|records| records.iter().collect()) | |
| 734 | + .unwrap_or_default() | |
| 735 | + } | |
| 736 | + | |
| 737 | + /// Update exchange rates | |
| 738 | + pub fn update_exchange_rates(&mut self, rates: HashMap<Currency, f64>) { | |
| 739 | + for (currency, rate) in rates { | |
| 740 | + self.exchange_rates.insert(currency, rate); | |
| 741 | + } | |
| 742 | + } | |
| 743 | + | |
| 744 | + /// Get supported payment methods for region | |
| 745 | + pub fn get_supported_methods(&self, country_code: &str) -> Vec<&PaymentMethod> { | |
| 746 | + self.payment_methods.iter() | |
| 747 | + .filter(|(_, config)| { | |
| 748 | + config.enabled && | |
| 749 | + (config.geographic_restrictions.is_empty() || | |
| 750 | + config.geographic_restrictions.contains(&country_code.to_string())) | |
| 751 | + }) | |
| 752 | + .map(|(method, _)| method) | |
| 753 | + .collect() | |
| 754 | + } | |
| 755 | +} | |
| 756 | + | |
| 757 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 758 | +struct PaymentFees { | |
| 759 | + pub base_fee: u64, | |
| 760 | + pub method_percentage_fee: u64, | |
| 761 | + pub method_fixed_fee: u64, | |
| 762 | + pub network_fee: u64, | |
| 763 | + pub exchange_fee: u64, | |
| 764 | + pub express_fee: u64, | |
| 765 | + pub total_fee: u64, | |
| 766 | +} | |
| 767 | + | |
| 768 | +#[cfg(test)] | |
| 769 | +mod tests { | |
| 770 | + use super::*; | |
| 771 | + | |
| 772 | + #[test] | |
| 773 | + fn test_payment_processor_creation() { | |
| 774 | + let processor = PaymentProcessor::new(); | |
| 775 | + assert!(!processor.payment_methods.is_empty()); | |
| 776 | + assert!(!processor.exchange_rates.is_empty()); | |
| 777 | + } | |
| 778 | + | |
| 779 | + #[tokio::test] | |
| 780 | + async fn test_payment_submission() { | |
| 781 | + let mut processor = PaymentProcessor::new(); | |
| 782 | + | |
| 783 | + let request = PaymentRequest { | |
| 784 | + request_id: String::new(), | |
| 785 | + volunteer_id: "test_volunteer".to_string(), | |
| 786 | + amount_tokens: 100 * 1_000_000_000_000_000_000, // 100 ZEPH | |
| 787 | + target_currency: Currency::USD, | |
| 788 | + payment_method: PaymentMethod::DigitalWallet(WalletProvider::PayPal), | |
| 789 | + recipient_info: RecipientInfo { | |
| 790 | + wallet_address: None, | |
| 791 | + bank_account: None, | |
| 792 | + digital_wallet: Some(DigitalWalletInfo { | |
| 793 | + provider: WalletProvider::PayPal, | |
| 794 | + wallet_id: "test@example.com".to_string(), | |
| 795 | + verified: true, | |
| 796 | + }), | |
| 797 | + kyc_verified: true, | |
| 798 | + tax_info: None, | |
| 799 | + }, | |
| 800 | + priority: PaymentPriority::Standard, | |
| 801 | + created_at: Utc::now(), | |
| 802 | + scheduled_for: None, | |
| 803 | + metadata: HashMap::new(), | |
| 804 | + }; | |
| 805 | + | |
| 806 | + let request_id = processor.submit_payment_request(request).unwrap(); | |
| 807 | + assert!(!request_id.is_empty()); | |
| 808 | + assert_eq!(processor.pending_payments.len(), 1); | |
| 809 | + } | |
| 810 | + | |
| 811 | + #[test] | |
| 812 | + fn test_currency_conversion() { | |
| 813 | + let processor = PaymentProcessor::new(); | |
| 814 | + let tokens = 100 * 1_000_000_000_000_000_000; // 100 ZEPH | |
| 815 | + | |
| 816 | + let usd_amount = processor.convert_tokens_to_currency(tokens, &Currency::USD).unwrap(); | |
| 817 | + assert_eq!(usd_amount, 1000); // $10.00 (100 ZEPH * $0.10 * 100 cents) | |
| 818 | + | |
| 819 | + let zeph_amount = processor.convert_tokens_to_currency(tokens, &Currency::ZephyrCoin).unwrap(); | |
| 820 | + assert_eq!(zeph_amount, tokens); // Same amount in ZEPH | |
| 821 | + } | |
| 822 | +} | |
src/economics/payout_scheduler.rsadded@@ -0,0 +1,886 @@ | ||
| 1 | +//! Automated Payout Scheduling System | |
| 2 | +//! | |
| 3 | +//! Manages scheduled payouts for ZephyrFS volunteers with configurable intervals | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::{HashMap, BTreeMap}; | |
| 8 | +use chrono::{DateTime, Utc, Duration, Weekday, TimeZone}; | |
| 9 | +use tokio::time::{sleep, Duration as TokioDuration}; | |
| 10 | + | |
| 11 | +use super::payment_processor::{PaymentProcessor, PaymentRequest, Currency, PaymentMethod, RecipientInfo, PaymentPriority}; | |
| 12 | +use super::earnings_calculator::EarningsCalculator; | |
| 13 | + | |
| 14 | +/// Automated payout scheduler | |
| 15 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 16 | +pub struct PayoutScheduler { | |
| 17 | + /// Volunteer payout preferences | |
| 18 | + pub volunteer_preferences: HashMap<String, PayoutPreferences>, | |
| 19 | + /// Scheduled payouts | |
| 20 | + pub scheduled_payouts: BTreeMap<DateTime<Utc>, ScheduledPayout>, | |
| 21 | + /// Payout policies and rules | |
| 22 | + pub policies: PayoutPolicies, | |
| 23 | + /// Accumulated earnings per volunteer | |
| 24 | + pub accumulated_earnings: HashMap<String, AccumulatedEarnings>, | |
| 25 | + /// Schedule configuration | |
| 26 | + pub schedule_config: ScheduleConfig, | |
| 27 | + /// Performance tracking | |
| 28 | + pub payout_history: HashMap<String, Vec<PayoutEvent>>, | |
| 29 | +} | |
| 30 | + | |
| 31 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 32 | +pub struct PayoutPreferences { | |
| 33 | + pub volunteer_id: String, | |
| 34 | + pub frequency: PayoutFrequency, | |
| 35 | + pub preferred_currency: Currency, | |
| 36 | + pub preferred_method: PaymentMethod, | |
| 37 | + pub recipient_info: RecipientInfo, | |
| 38 | + pub minimum_threshold: u64, | |
| 39 | + pub auto_payout_enabled: bool, | |
| 40 | + pub timezone: String, | |
| 41 | + pub preferred_day: Option<Weekday>, | |
| 42 | + pub preferred_hour: u8, // 0-23 | |
| 43 | + pub priority: PaymentPriority, | |
| 44 | + pub split_payments: Option<SplitPaymentConfig>, | |
| 45 | +} | |
| 46 | + | |
| 47 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 48 | +pub enum PayoutFrequency { | |
| 49 | + Daily, | |
| 50 | + Weekly, | |
| 51 | + BiWeekly, | |
| 52 | + Monthly, | |
| 53 | + Quarterly, | |
| 54 | + Manual, // Only manual payouts | |
| 55 | + Threshold(u64), // Pay when threshold reached | |
| 56 | +} | |
| 57 | + | |
| 58 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 59 | +pub struct SplitPaymentConfig { | |
| 60 | + pub enabled: bool, | |
| 61 | + pub primary_percentage: f64, // 0.0-1.0 | |
| 62 | + pub primary_method: PaymentMethod, | |
| 63 | + pub secondary_method: PaymentMethod, | |
| 64 | + pub secondary_currency: Currency, | |
| 65 | +} | |
| 66 | + | |
| 67 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 68 | +pub struct ScheduledPayout { | |
| 69 | + pub payout_id: String, | |
| 70 | + pub volunteer_id: String, | |
| 71 | + pub amount_tokens: u64, | |
| 72 | + pub target_currency: Currency, | |
| 73 | + pub payment_method: PaymentMethod, | |
| 74 | + pub recipient_info: RecipientInfo, | |
| 75 | + pub scheduled_time: DateTime<Utc>, | |
| 76 | + pub created_at: DateTime<Utc>, | |
| 77 | + pub priority: PaymentPriority, | |
| 78 | + pub recurring: bool, | |
| 79 | + pub next_occurrence: Option<DateTime<Utc>>, | |
| 80 | + pub metadata: HashMap<String, String>, | |
| 81 | +} | |
| 82 | + | |
| 83 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 84 | +pub struct PayoutPolicies { | |
| 85 | + /// Minimum time between payouts (hours) | |
| 86 | + pub min_payout_interval_hours: u32, | |
| 87 | + /// Maximum accumulated earnings before forced payout | |
| 88 | + pub max_accumulated_tokens: u64, | |
| 89 | + /// Grace period for failed payments (hours) | |
| 90 | + pub payment_retry_grace_hours: u32, | |
| 91 | + /// Automatic threshold adjustment | |
| 92 | + pub auto_adjust_thresholds: bool, | |
| 93 | + /// Holiday/weekend handling | |
| 94 | + pub holiday_handling: HolidayHandling, | |
| 95 | + /// Risk management | |
| 96 | + pub risk_controls: RiskControls, | |
| 97 | +} | |
| 98 | + | |
| 99 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 100 | +pub struct HolidayHandling { | |
| 101 | + pub skip_weekends: bool, | |
| 102 | + pub skip_holidays: bool, | |
| 103 | + pub advance_before_holiday: bool, | |
| 104 | + pub supported_regions: Vec<String>, | |
| 105 | +} | |
| 106 | + | |
| 107 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 108 | +pub struct RiskControls { | |
| 109 | + pub max_daily_payout_per_volunteer: u64, | |
| 110 | + pub max_total_daily_payouts: u64, | |
| 111 | + pub suspicious_activity_threshold: f64, | |
| 112 | + pub require_additional_verification: bool, | |
| 113 | + pub fraud_detection_enabled: bool, | |
| 114 | +} | |
| 115 | + | |
| 116 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 117 | +pub struct AccumulatedEarnings { | |
| 118 | + pub volunteer_id: String, | |
| 119 | + pub total_tokens: u64, | |
| 120 | + pub last_payout: Option<DateTime<Utc>>, | |
| 121 | + pub accumulation_start: DateTime<Utc>, | |
| 122 | + pub daily_breakdown: HashMap<String, u64>, // date -> earnings | |
| 123 | + pub bonus_tokens: u64, | |
| 124 | + pub pending_taxes: u64, | |
| 125 | +} | |
| 126 | + | |
| 127 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 128 | +pub struct ScheduleConfig { | |
| 129 | + pub processing_interval_minutes: u32, | |
| 130 | + pub lookahead_hours: u32, | |
| 131 | + pub batch_processing: bool, | |
| 132 | + pub max_concurrent_payouts: usize, | |
| 133 | + pub retry_failed_payouts: bool, | |
| 134 | + pub notification_enabled: bool, | |
| 135 | +} | |
| 136 | + | |
| 137 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 138 | +pub struct PayoutEvent { | |
| 139 | + pub event_id: String, | |
| 140 | + pub payout_id: String, | |
| 141 | + pub volunteer_id: String, | |
| 142 | + pub event_type: PayoutEventType, | |
| 143 | + pub amount: u64, | |
| 144 | + pub currency: Currency, | |
| 145 | + pub timestamp: DateTime<Utc>, | |
| 146 | + pub success: bool, | |
| 147 | + pub error_message: Option<String>, | |
| 148 | + pub payment_reference: Option<String>, | |
| 149 | +} | |
| 150 | + | |
| 151 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 152 | +pub enum PayoutEventType { | |
| 153 | + Scheduled, | |
| 154 | + ThresholdReached, | |
| 155 | + Manual, | |
| 156 | + Emergency, | |
| 157 | + Retry, | |
| 158 | +} | |
| 159 | + | |
| 160 | +impl Default for PayoutPolicies { | |
| 161 | + fn default() -> Self { | |
| 162 | + Self { | |
| 163 | + min_payout_interval_hours: 24, | |
| 164 | + max_accumulated_tokens: 1000 * 1_000_000_000_000_000_000, // 1000 ZEPH | |
| 165 | + payment_retry_grace_hours: 72, | |
| 166 | + auto_adjust_thresholds: true, | |
| 167 | + holiday_handling: HolidayHandling { | |
| 168 | + skip_weekends: false, | |
| 169 | + skip_holidays: true, | |
| 170 | + advance_before_holiday: true, | |
| 171 | + supported_regions: vec!["US".to_string(), "EU".to_string()], | |
| 172 | + }, | |
| 173 | + risk_controls: RiskControls { | |
| 174 | + max_daily_payout_per_volunteer: 10000 * 1_000_000_000_000_000_000, // 10k ZEPH | |
| 175 | + max_total_daily_payouts: 100000 * 1_000_000_000_000_000_000, // 100k ZEPH | |
| 176 | + suspicious_activity_threshold: 5.0, // 5x normal activity | |
| 177 | + require_additional_verification: false, | |
| 178 | + fraud_detection_enabled: true, | |
| 179 | + }, | |
| 180 | + } | |
| 181 | + } | |
| 182 | +} | |
| 183 | + | |
| 184 | +impl Default for ScheduleConfig { | |
| 185 | + fn default() -> Self { | |
| 186 | + Self { | |
| 187 | + processing_interval_minutes: 15, | |
| 188 | + lookahead_hours: 24, | |
| 189 | + batch_processing: true, | |
| 190 | + max_concurrent_payouts: 10, | |
| 191 | + retry_failed_payouts: true, | |
| 192 | + notification_enabled: true, | |
| 193 | + } | |
| 194 | + } | |
| 195 | +} | |
| 196 | + | |
| 197 | +impl PayoutScheduler { | |
| 198 | + /// Create new payout scheduler | |
| 199 | + pub fn new() -> Self { | |
| 200 | + Self { | |
| 201 | + volunteer_preferences: HashMap::new(), | |
| 202 | + scheduled_payouts: BTreeMap::new(), | |
| 203 | + policies: PayoutPolicies::default(), | |
| 204 | + accumulated_earnings: HashMap::new(), | |
| 205 | + schedule_config: ScheduleConfig::default(), | |
| 206 | + payout_history: HashMap::new(), | |
| 207 | + } | |
| 208 | + } | |
| 209 | + | |
| 210 | + /// Set volunteer payout preferences | |
| 211 | + pub fn set_volunteer_preferences(&mut self, preferences: PayoutPreferences) { | |
| 212 | + let volunteer_id = preferences.volunteer_id.clone(); | |
| 213 | + self.volunteer_preferences.insert(volunteer_id.clone(), preferences); | |
| 214 | + | |
| 215 | + // Initialize accumulated earnings if needed | |
| 216 | + if !self.accumulated_earnings.contains_key(&volunteer_id) { | |
| 217 | + self.accumulated_earnings.insert(volunteer_id.clone(), AccumulatedEarnings { | |
| 218 | + volunteer_id, | |
| 219 | + total_tokens: 0, | |
| 220 | + last_payout: None, | |
| 221 | + accumulation_start: Utc::now(), | |
| 222 | + daily_breakdown: HashMap::new(), | |
| 223 | + bonus_tokens: 0, | |
| 224 | + pending_taxes: 0, | |
| 225 | + }); | |
| 226 | + } | |
| 227 | + | |
| 228 | + tracing::info!("Updated payout preferences for volunteer: {}", volunteer_id); | |
| 229 | + } | |
| 230 | + | |
| 231 | + /// Add earnings to volunteer's accumulated total | |
| 232 | + pub fn add_earnings(&mut self, volunteer_id: &str, tokens: u64, bonus_tokens: u64) -> Result<()> { | |
| 233 | + let accumulated = self.accumulated_earnings.get_mut(volunteer_id) | |
| 234 | + .ok_or_else(|| anyhow::anyhow!("Volunteer not found in earnings tracker"))?; | |
| 235 | + | |
| 236 | + accumulated.total_tokens += tokens; | |
| 237 | + accumulated.bonus_tokens += bonus_tokens; | |
| 238 | + | |
| 239 | + // Track daily breakdown | |
| 240 | + let today = Utc::now().date_naive().to_string(); | |
| 241 | + *accumulated.daily_breakdown.entry(today).or_insert(0) += tokens; | |
| 242 | + | |
| 243 | + // Check if threshold payout should be triggered | |
| 244 | + if let Some(preferences) = self.volunteer_preferences.get(volunteer_id) { | |
| 245 | + if let PayoutFrequency::Threshold(threshold) = preferences.frequency { | |
| 246 | + if accumulated.total_tokens >= threshold { | |
| 247 | + self.schedule_threshold_payout(volunteer_id)?; | |
| 248 | + } | |
| 249 | + } | |
| 250 | + | |
| 251 | + // Check maximum accumulation policy | |
| 252 | + if accumulated.total_tokens >= self.policies.max_accumulated_tokens { | |
| 253 | + self.schedule_emergency_payout(volunteer_id)?; | |
| 254 | + } | |
| 255 | + } | |
| 256 | + | |
| 257 | + tracing::debug!("Added {} tokens to {}, total: {}", | |
| 258 | + tokens, volunteer_id, accumulated.total_tokens); | |
| 259 | + | |
| 260 | + Ok(()) | |
| 261 | + } | |
| 262 | + | |
| 263 | + /// Schedule threshold-based payout | |
| 264 | + fn schedule_threshold_payout(&mut self, volunteer_id: &str) -> Result<()> { | |
| 265 | + let preferences = self.volunteer_preferences.get(volunteer_id) | |
| 266 | + .ok_or_else(|| anyhow::anyhow!("Volunteer preferences not found"))?; | |
| 267 | + | |
| 268 | + let accumulated = self.accumulated_earnings.get(volunteer_id) | |
| 269 | + .ok_or_else(|| anyhow::anyhow!("Accumulated earnings not found"))?; | |
| 270 | + | |
| 271 | + let payout = ScheduledPayout { | |
| 272 | + payout_id: format!("threshold_{}_{}", volunteer_id, Utc::now().timestamp()), | |
| 273 | + volunteer_id: volunteer_id.to_string(), | |
| 274 | + amount_tokens: accumulated.total_tokens, | |
| 275 | + target_currency: preferences.preferred_currency.clone(), | |
| 276 | + payment_method: preferences.preferred_method.clone(), | |
| 277 | + recipient_info: preferences.recipient_info.clone(), | |
| 278 | + scheduled_time: self.calculate_next_payout_time(preferences)?, | |
| 279 | + created_at: Utc::now(), | |
| 280 | + priority: preferences.priority.clone(), | |
| 281 | + recurring: false, | |
| 282 | + next_occurrence: None, | |
| 283 | + metadata: HashMap::from([ | |
| 284 | + ("trigger".to_string(), "threshold".to_string()), | |
| 285 | + ("threshold".to_string(), preferences.minimum_threshold.to_string()), | |
| 286 | + ]), | |
| 287 | + }; | |
| 288 | + | |
| 289 | + self.scheduled_payouts.insert(payout.scheduled_time, payout); | |
| 290 | + | |
| 291 | + tracing::info!("Scheduled threshold payout for {}: {} tokens", | |
| 292 | + volunteer_id, accumulated.total_tokens); | |
| 293 | + | |
| 294 | + Ok(()) | |
| 295 | + } | |
| 296 | + | |
| 297 | + /// Schedule emergency payout for max accumulation | |
| 298 | + fn schedule_emergency_payout(&mut self, volunteer_id: &str) -> Result<()> { | |
| 299 | + let preferences = self.volunteer_preferences.get(volunteer_id) | |
| 300 | + .ok_or_else(|| anyhow::anyhow!("Volunteer preferences not found"))?; | |
| 301 | + | |
| 302 | + let accumulated = self.accumulated_earnings.get(volunteer_id) | |
| 303 | + .ok_or_else(|| anyhow::anyhow!("Accumulated earnings not found"))?; | |
| 304 | + | |
| 305 | + let payout = ScheduledPayout { | |
| 306 | + payout_id: format!("emergency_{}_{}", volunteer_id, Utc::now().timestamp()), | |
| 307 | + volunteer_id: volunteer_id.to_string(), | |
| 308 | + amount_tokens: accumulated.total_tokens, | |
| 309 | + target_currency: preferences.preferred_currency.clone(), | |
| 310 | + payment_method: preferences.preferred_method.clone(), | |
| 311 | + recipient_info: preferences.recipient_info.clone(), | |
| 312 | + scheduled_time: Utc::now() + Duration::hours(1), // Emergency: 1 hour delay | |
| 313 | + created_at: Utc::now(), | |
| 314 | + priority: PaymentPriority::Immediate, | |
| 315 | + recurring: false, | |
| 316 | + next_occurrence: None, | |
| 317 | + metadata: HashMap::from([ | |
| 318 | + ("trigger".to_string(), "emergency".to_string()), | |
| 319 | + ("reason".to_string(), "max_accumulation".to_string()), | |
| 320 | + ]), | |
| 321 | + }; | |
| 322 | + | |
| 323 | + self.scheduled_payouts.insert(payout.scheduled_time, payout); | |
| 324 | + | |
| 325 | + tracing::warn!("Scheduled emergency payout for {}: {} tokens (max accumulation)", | |
| 326 | + volunteer_id, accumulated.total_tokens); | |
| 327 | + | |
| 328 | + Ok(()) | |
| 329 | + } | |
| 330 | + | |
| 331 | + /// Calculate next payout time based on preferences | |
| 332 | + fn calculate_next_payout_time(&self, preferences: &PayoutPreferences) -> Result<DateTime<Utc>> { | |
| 333 | + let now = Utc::now(); | |
| 334 | + let base_time = match preferences.frequency { | |
| 335 | + PayoutFrequency::Daily => now + Duration::days(1), | |
| 336 | + PayoutFrequency::Weekly => now + Duration::weeks(1), | |
| 337 | + PayoutFrequency::BiWeekly => now + Duration::weeks(2), | |
| 338 | + PayoutFrequency::Monthly => now + Duration::days(30), | |
| 339 | + PayoutFrequency::Quarterly => now + Duration::days(90), | |
| 340 | + PayoutFrequency::Threshold(_) => now + Duration::hours(1), // Immediate | |
| 341 | + PayoutFrequency::Manual => return Err(anyhow::anyhow!("Manual payouts don't have scheduled times")), | |
| 342 | + }; | |
| 343 | + | |
| 344 | + // Adjust for preferred day and hour | |
| 345 | + let mut adjusted_time = base_time; | |
| 346 | + | |
| 347 | + // Set preferred hour | |
| 348 | + let target_hour = preferences.preferred_hour; | |
| 349 | + adjusted_time = adjusted_time | |
| 350 | + .with_hour(target_hour) | |
| 351 | + .and_then(|dt| dt.with_minute(0)) | |
| 352 | + .and_then(|dt| dt.with_second(0)) | |
| 353 | + .ok_or_else(|| anyhow::anyhow!("Invalid time adjustment"))?; | |
| 354 | + | |
| 355 | + // Adjust for preferred day of week (for weekly/biweekly) | |
| 356 | + if let Some(preferred_day) = preferences.preferred_day { | |
| 357 | + if matches!(preferences.frequency, PayoutFrequency::Weekly | PayoutFrequency::BiWeekly) { | |
| 358 | + let current_weekday = adjusted_time.weekday(); | |
| 359 | + let days_until_preferred = (preferred_day.number_from_monday() as i64 | |
| 360 | + - current_weekday.number_from_monday() as i64 + 7) % 7; | |
| 361 | + | |
| 362 | + if days_until_preferred > 0 { | |
| 363 | + adjusted_time += Duration::days(days_until_preferred); | |
| 364 | + } | |
| 365 | + } | |
| 366 | + } | |
| 367 | + | |
| 368 | + // Handle holidays and weekends | |
| 369 | + adjusted_time = self.adjust_for_holidays(adjusted_time); | |
| 370 | + | |
| 371 | + Ok(adjusted_time) | |
| 372 | + } | |
| 373 | + | |
| 374 | + /// Adjust payout time for holidays and weekends | |
| 375 | + fn adjust_for_holidays(&self, mut payout_time: DateTime<Utc>) -> DateTime<Utc> { | |
| 376 | + let holiday_config = &self.policies.holiday_handling; | |
| 377 | + | |
| 378 | + // Skip weekends if configured | |
| 379 | + if holiday_config.skip_weekends { | |
| 380 | + let weekday = payout_time.weekday(); | |
| 381 | + if weekday == Weekday::Sat { | |
| 382 | + payout_time += Duration::days(2); // Move to Monday | |
| 383 | + } else if weekday == Weekday::Sun { | |
| 384 | + payout_time += Duration::days(1); // Move to Monday | |
| 385 | + } | |
| 386 | + } | |
| 387 | + | |
| 388 | + // Skip holidays (simplified - would use real holiday calendar in production) | |
| 389 | + if holiday_config.skip_holidays { | |
| 390 | + // Example: Skip December 25th | |
| 391 | + if payout_time.month() == 12 && payout_time.day() == 25 { | |
| 392 | + payout_time += Duration::days(1); | |
| 393 | + } | |
| 394 | + } | |
| 395 | + | |
| 396 | + payout_time | |
| 397 | + } | |
| 398 | + | |
| 399 | + /// Generate recurring payouts for all volunteers | |
| 400 | + pub fn generate_recurring_payouts(&mut self) -> Result<usize> { | |
| 401 | + let mut created_count = 0; | |
| 402 | + let now = Utc::now(); | |
| 403 | + let lookahead = now + Duration::hours(self.schedule_config.lookahead_hours as i64); | |
| 404 | + | |
| 405 | + for (volunteer_id, preferences) in &self.volunteer_preferences.clone() { | |
| 406 | + if !preferences.auto_payout_enabled { | |
| 407 | + continue; | |
| 408 | + } | |
| 409 | + | |
| 410 | + let accumulated = self.accumulated_earnings.get(volunteer_id) | |
| 411 | + .ok_or_else(|| anyhow::anyhow!("Accumulated earnings not found for {}", volunteer_id))?; | |
| 412 | + | |
| 413 | + // Check if volunteer has earnings to pay out | |
| 414 | + if accumulated.total_tokens < preferences.minimum_threshold { | |
| 415 | + continue; | |
| 416 | + } | |
| 417 | + | |
| 418 | + // Check if last payout was recent enough | |
| 419 | + if let Some(last_payout) = accumulated.last_payout { | |
| 420 | + let hours_since_last = (now - last_payout).num_hours(); | |
| 421 | + if hours_since_last < self.policies.min_payout_interval_hours as i64 { | |
| 422 | + continue; | |
| 423 | + } | |
| 424 | + } | |
| 425 | + | |
| 426 | + // Calculate next payout time | |
| 427 | + let next_payout_time = self.calculate_next_payout_time(preferences)?; | |
| 428 | + | |
| 429 | + // Only schedule if within lookahead window | |
| 430 | + if next_payout_time <= lookahead { | |
| 431 | + // Check if already scheduled | |
| 432 | + let already_scheduled = self.scheduled_payouts.values() | |
| 433 | + .any(|payout| payout.volunteer_id == *volunteer_id && !payout.recurring); | |
| 434 | + | |
| 435 | + if !already_scheduled { | |
| 436 | + let payout = self.create_scheduled_payout(volunteer_id, preferences, accumulated)?; | |
| 437 | + self.scheduled_payouts.insert(payout.scheduled_time, payout); | |
| 438 | + created_count += 1; | |
| 439 | + } | |
| 440 | + } | |
| 441 | + } | |
| 442 | + | |
| 443 | + if created_count > 0 { | |
| 444 | + tracing::info!("Generated {} recurring payouts", created_count); | |
| 445 | + } | |
| 446 | + | |
| 447 | + Ok(created_count) | |
| 448 | + } | |
| 449 | + | |
| 450 | + /// Create scheduled payout from preferences and earnings | |
| 451 | + fn create_scheduled_payout( | |
| 452 | + &self, | |
| 453 | + volunteer_id: &str, | |
| 454 | + preferences: &PayoutPreferences, | |
| 455 | + accumulated: &AccumulatedEarnings, | |
| 456 | + ) -> Result<ScheduledPayout> { | |
| 457 | + let payout_time = self.calculate_next_payout_time(preferences)?; | |
| 458 | + | |
| 459 | + // Handle split payments | |
| 460 | + let (amount, method, currency) = if let Some(split_config) = &preferences.split_payments { | |
| 461 | + if split_config.enabled { | |
| 462 | + // For now, use primary payment - secondary would be handled separately | |
| 463 | + let primary_amount = (accumulated.total_tokens as f64 * split_config.primary_percentage) as u64; | |
| 464 | + (primary_amount, split_config.primary_method.clone(), preferences.preferred_currency.clone()) | |
| 465 | + } else { | |
| 466 | + (accumulated.total_tokens, preferences.preferred_method.clone(), preferences.preferred_currency.clone()) | |
| 467 | + } | |
| 468 | + } else { | |
| 469 | + (accumulated.total_tokens, preferences.preferred_method.clone(), preferences.preferred_currency.clone()) | |
| 470 | + }; | |
| 471 | + | |
| 472 | + Ok(ScheduledPayout { | |
| 473 | + payout_id: format!("sched_{}_{}", volunteer_id, payout_time.timestamp()), | |
| 474 | + volunteer_id: volunteer_id.to_string(), | |
| 475 | + amount_tokens: amount, | |
| 476 | + target_currency: currency, | |
| 477 | + payment_method: method, | |
| 478 | + recipient_info: preferences.recipient_info.clone(), | |
| 479 | + scheduled_time: payout_time, | |
| 480 | + created_at: Utc::now(), | |
| 481 | + priority: preferences.priority.clone(), | |
| 482 | + recurring: true, | |
| 483 | + next_occurrence: Some(self.calculate_next_recurring_time(preferences, payout_time)?), | |
| 484 | + metadata: HashMap::from([ | |
| 485 | + ("trigger".to_string(), "recurring".to_string()), | |
| 486 | + ("frequency".to_string(), format!("{:?}", preferences.frequency)), | |
| 487 | + ]), | |
| 488 | + }) | |
| 489 | + } | |
| 490 | + | |
| 491 | + /// Calculate next occurrence for recurring payout | |
| 492 | + fn calculate_next_recurring_time( | |
| 493 | + &self, | |
| 494 | + preferences: &PayoutPreferences, | |
| 495 | + current_time: DateTime<Utc>, | |
| 496 | + ) -> Result<DateTime<Utc>> { | |
| 497 | + let next_time = match preferences.frequency { | |
| 498 | + PayoutFrequency::Daily => current_time + Duration::days(1), | |
| 499 | + PayoutFrequency::Weekly => current_time + Duration::weeks(1), | |
| 500 | + PayoutFrequency::BiWeekly => current_time + Duration::weeks(2), | |
| 501 | + PayoutFrequency::Monthly => current_time + Duration::days(30), | |
| 502 | + PayoutFrequency::Quarterly => current_time + Duration::days(90), | |
| 503 | + _ => return Err(anyhow::anyhow!("Frequency doesn't support recurring")), | |
| 504 | + }; | |
| 505 | + | |
| 506 | + Ok(self.adjust_for_holidays(next_time)) | |
| 507 | + } | |
| 508 | + | |
| 509 | + /// Process due payouts | |
| 510 | + pub async fn process_due_payouts( | |
| 511 | + &mut self, | |
| 512 | + payment_processor: &mut PaymentProcessor, | |
| 513 | + ) -> Result<Vec<PayoutEvent>> { | |
| 514 | + let now = Utc::now(); | |
| 515 | + let mut events = Vec::new(); | |
| 516 | + | |
| 517 | + // Collect due payouts | |
| 518 | + let due_payouts: Vec<_> = self.scheduled_payouts | |
| 519 | + .range(..=now) | |
| 520 | + .map(|(_, payout)| payout.clone()) | |
| 521 | + .collect(); | |
| 522 | + | |
| 523 | + for payout in due_payouts { | |
| 524 | + // Remove from scheduled | |
| 525 | + self.scheduled_payouts.remove(&payout.scheduled_time); | |
| 526 | + | |
| 527 | + // Process payout | |
| 528 | + let event = self.process_single_payout(payout, payment_processor).await?; | |
| 529 | + events.push(event.clone()); | |
| 530 | + | |
| 531 | + // Record in history | |
| 532 | + self.payout_history | |
| 533 | + .entry(event.volunteer_id.clone()) | |
| 534 | + .or_insert_with(Vec::new) | |
| 535 | + .push(event); | |
| 536 | + } | |
| 537 | + | |
| 538 | + Ok(events) | |
| 539 | + } | |
| 540 | + | |
| 541 | + /// Process single payout | |
| 542 | + async fn process_single_payout( | |
| 543 | + &mut self, | |
| 544 | + payout: ScheduledPayout, | |
| 545 | + payment_processor: &mut PaymentProcessor, | |
| 546 | + ) -> Result<PayoutEvent> { | |
| 547 | + let event_id = format!("event_{}_{}", payout.volunteer_id, Utc::now().timestamp_millis()); | |
| 548 | + | |
| 549 | + // Apply risk controls | |
| 550 | + if let Err(risk_error) = self.check_risk_controls(&payout) { | |
| 551 | + let event = PayoutEvent { | |
| 552 | + event_id, | |
| 553 | + payout_id: payout.payout_id, | |
| 554 | + volunteer_id: payout.volunteer_id, | |
| 555 | + event_type: PayoutEventType::Scheduled, | |
| 556 | + amount: payout.amount_tokens, | |
| 557 | + currency: payout.target_currency, | |
| 558 | + timestamp: Utc::now(), | |
| 559 | + success: false, | |
| 560 | + error_message: Some(risk_error.to_string()), | |
| 561 | + payment_reference: None, | |
| 562 | + }; | |
| 563 | + | |
| 564 | + tracing::warn!("Payout blocked by risk controls: {}", risk_error); | |
| 565 | + return Ok(event); | |
| 566 | + } | |
| 567 | + | |
| 568 | + // Create payment request | |
| 569 | + let payment_request = PaymentRequest { | |
| 570 | + request_id: payout.payout_id.clone(), | |
| 571 | + volunteer_id: payout.volunteer_id.clone(), | |
| 572 | + amount_tokens: payout.amount_tokens, | |
| 573 | + target_currency: payout.target_currency.clone(), | |
| 574 | + payment_method: payout.payment_method.clone(), | |
| 575 | + recipient_info: payout.recipient_info.clone(), | |
| 576 | + priority: payout.priority.clone(), | |
| 577 | + created_at: Utc::now(), | |
| 578 | + scheduled_for: None, | |
| 579 | + metadata: payout.metadata.clone(), | |
| 580 | + }; | |
| 581 | + | |
| 582 | + // Submit to payment processor | |
| 583 | + match payment_processor.submit_payment_request(payment_request) { | |
| 584 | + Ok(payment_reference) => { | |
| 585 | + // Update accumulated earnings | |
| 586 | + if let Some(accumulated) = self.accumulated_earnings.get_mut(&payout.volunteer_id) { | |
| 587 | + accumulated.total_tokens = accumulated.total_tokens.saturating_sub(payout.amount_tokens); | |
| 588 | + accumulated.last_payout = Some(Utc::now()); | |
| 589 | + } | |
| 590 | + | |
| 591 | + // Schedule next occurrence if recurring | |
| 592 | + if payout.recurring { | |
| 593 | + if let Some(next_time) = payout.next_occurrence { | |
| 594 | + let mut next_payout = payout.clone(); | |
| 595 | + next_payout.payout_id = format!("sched_{}_{}", payout.volunteer_id, next_time.timestamp()); | |
| 596 | + next_payout.scheduled_time = next_time; | |
| 597 | + next_payout.created_at = Utc::now(); | |
| 598 | + | |
| 599 | + // Calculate next occurrence after this one | |
| 600 | + if let Some(preferences) = self.volunteer_preferences.get(&payout.volunteer_id) { | |
| 601 | + next_payout.next_occurrence = self.calculate_next_recurring_time(preferences, next_time).ok(); | |
| 602 | + } | |
| 603 | + | |
| 604 | + self.scheduled_payouts.insert(next_time, next_payout); | |
| 605 | + } | |
| 606 | + } | |
| 607 | + | |
| 608 | + let event = PayoutEvent { | |
| 609 | + event_id, | |
| 610 | + payout_id: payout.payout_id, | |
| 611 | + volunteer_id: payout.volunteer_id, | |
| 612 | + event_type: PayoutEventType::Scheduled, | |
| 613 | + amount: payout.amount_tokens, | |
| 614 | + currency: payout.target_currency, | |
| 615 | + timestamp: Utc::now(), | |
| 616 | + success: true, | |
| 617 | + error_message: None, | |
| 618 | + payment_reference: Some(payment_reference), | |
| 619 | + }; | |
| 620 | + | |
| 621 | + tracing::info!("Payout processed successfully: {} tokens to {}", | |
| 622 | + payout.amount_tokens, payout.volunteer_id); | |
| 623 | + | |
| 624 | + Ok(event) | |
| 625 | + }, | |
| 626 | + Err(e) => { | |
| 627 | + let event = PayoutEvent { | |
| 628 | + event_id, | |
| 629 | + payout_id: payout.payout_id, | |
| 630 | + volunteer_id: payout.volunteer_id, | |
| 631 | + event_type: PayoutEventType::Scheduled, | |
| 632 | + amount: payout.amount_tokens, | |
| 633 | + currency: payout.target_currency, | |
| 634 | + timestamp: Utc::now(), | |
| 635 | + success: false, | |
| 636 | + error_message: Some(e.to_string()), | |
| 637 | + payment_reference: None, | |
| 638 | + }; | |
| 639 | + | |
| 640 | + tracing::error!("Payout failed: {}", e); | |
| 641 | + | |
| 642 | + // Schedule retry if configured | |
| 643 | + if self.schedule_config.retry_failed_payouts { | |
| 644 | + self.schedule_payout_retry(payout)?; | |
| 645 | + } | |
| 646 | + | |
| 647 | + Ok(event) | |
| 648 | + } | |
| 649 | + } | |
| 650 | + } | |
| 651 | + | |
| 652 | + /// Check risk controls for payout | |
| 653 | + fn check_risk_controls(&self, payout: &ScheduledPayout) -> Result<()> { | |
| 654 | + let risk_controls = &self.policies.risk_controls; | |
| 655 | + | |
| 656 | + // Check daily limit per volunteer | |
| 657 | + if payout.amount_tokens > risk_controls.max_daily_payout_per_volunteer { | |
| 658 | + return Err(anyhow::anyhow!("Exceeds daily payout limit per volunteer")); | |
| 659 | + } | |
| 660 | + | |
| 661 | + // Check total daily payouts | |
| 662 | + let today = Utc::now().date_naive(); | |
| 663 | + let today_payouts: u64 = self.payout_history | |
| 664 | + .values() | |
| 665 | + .flatten() | |
| 666 | + .filter(|event| event.timestamp.date_naive() == today && event.success) | |
| 667 | + .map(|event| event.amount) | |
| 668 | + .sum(); | |
| 669 | + | |
| 670 | + if today_payouts + payout.amount_tokens > risk_controls.max_total_daily_payouts { | |
| 671 | + return Err(anyhow::anyhow!("Exceeds total daily payout limit")); | |
| 672 | + } | |
| 673 | + | |
| 674 | + // Check for suspicious activity | |
| 675 | + if risk_controls.fraud_detection_enabled { | |
| 676 | + let volunteer_history = self.payout_history.get(&payout.volunteer_id); | |
| 677 | + if let Some(history) = volunteer_history { | |
| 678 | + if history.len() > 1 { | |
| 679 | + let recent_average = history.iter() | |
| 680 | + .rev() | |
| 681 | + .take(10) | |
| 682 | + .map(|e| e.amount) | |
| 683 | + .sum::<u64>() / 10.min(history.len()) as u64; | |
| 684 | + | |
| 685 | + let activity_ratio = payout.amount_tokens as f64 / recent_average as f64; | |
| 686 | + if activity_ratio > risk_controls.suspicious_activity_threshold { | |
| 687 | + return Err(anyhow::anyhow!("Suspicious activity detected: {}x normal amount", activity_ratio)); | |
| 688 | + } | |
| 689 | + } | |
| 690 | + } | |
| 691 | + } | |
| 692 | + | |
| 693 | + Ok(()) | |
| 694 | + } | |
| 695 | + | |
| 696 | + /// Schedule retry for failed payout | |
| 697 | + fn schedule_payout_retry(&mut self, mut payout: ScheduledPayout) -> Result<()> { | |
| 698 | + let retry_time = Utc::now() + Duration::hours(self.policies.payment_retry_grace_hours as i64); | |
| 699 | + | |
| 700 | + payout.payout_id = format!("retry_{}_{}", payout.volunteer_id, retry_time.timestamp()); | |
| 701 | + payout.scheduled_time = retry_time; | |
| 702 | + payout.priority = PaymentPriority::Express; // Higher priority for retries | |
| 703 | + payout.metadata.insert("retry".to_string(), "true".to_string()); | |
| 704 | + | |
| 705 | + self.scheduled_payouts.insert(retry_time, payout); | |
| 706 | + | |
| 707 | + Ok(()) | |
| 708 | + } | |
| 709 | + | |
| 710 | + /// Run automated payout processing loop | |
| 711 | + pub async fn run_automated_processing( | |
| 712 | + &mut self, | |
| 713 | + mut payment_processor: PaymentProcessor, | |
| 714 | + mut earnings_calculator: EarningsCalculator, | |
| 715 | + ) -> Result<()> { | |
| 716 | + let mut interval = tokio::time::interval( | |
| 717 | + TokioDuration::from_secs(self.schedule_config.processing_interval_minutes as u64 * 60) | |
| 718 | + ); | |
| 719 | + | |
| 720 | + loop { | |
| 721 | + interval.tick().await; | |
| 722 | + | |
| 723 | + // Generate recurring payouts | |
| 724 | + if let Err(e) = self.generate_recurring_payouts() { | |
| 725 | + tracing::error!("Failed to generate recurring payouts: {}", e); | |
| 726 | + } | |
| 727 | + | |
| 728 | + // Process due payouts | |
| 729 | + match self.process_due_payouts(&mut payment_processor).await { | |
| 730 | + Ok(events) => { | |
| 731 | + if !events.is_empty() { | |
| 732 | + tracing::info!("Processed {} payouts", events.len()); | |
| 733 | + } | |
| 734 | + }, | |
| 735 | + Err(e) => { | |
| 736 | + tracing::error!("Failed to process payouts: {}", e); | |
| 737 | + } | |
| 738 | + } | |
| 739 | + | |
| 740 | + // Update accumulated earnings from calculator | |
| 741 | + // This would be integrated with the earnings calculator in a real implementation | |
| 742 | + | |
| 743 | + tracing::debug!("Payout processing cycle complete"); | |
| 744 | + } | |
| 745 | + } | |
| 746 | + | |
| 747 | + /// Get payout history for volunteer | |
| 748 | + pub fn get_payout_history(&self, volunteer_id: &str) -> Vec<&PayoutEvent> { | |
| 749 | + self.payout_history.get(volunteer_id) | |
| 750 | + .map(|events| events.iter().collect()) | |
| 751 | + .unwrap_or_default() | |
| 752 | + } | |
| 753 | + | |
| 754 | + /// Get upcoming payouts for volunteer | |
| 755 | + pub fn get_upcoming_payouts(&self, volunteer_id: &str) -> Vec<&ScheduledPayout> { | |
| 756 | + self.scheduled_payouts.values() | |
| 757 | + .filter(|payout| payout.volunteer_id == volunteer_id) | |
| 758 | + .collect() | |
| 759 | + } | |
| 760 | + | |
| 761 | + /// Manual payout trigger | |
| 762 | + pub async fn trigger_manual_payout( | |
| 763 | + &mut self, | |
| 764 | + volunteer_id: &str, | |
| 765 | + payment_processor: &mut PaymentProcessor, | |
| 766 | + ) -> Result<PayoutEvent> { | |
| 767 | + let preferences = self.volunteer_preferences.get(volunteer_id) | |
| 768 | + .ok_or_else(|| anyhow::anyhow!("Volunteer preferences not found"))?; | |
| 769 | + | |
| 770 | + let accumulated = self.accumulated_earnings.get(volunteer_id) | |
| 771 | + .ok_or_else(|| anyhow::anyhow!("Accumulated earnings not found"))?; | |
| 772 | + | |
| 773 | + if accumulated.total_tokens < preferences.minimum_threshold { | |
| 774 | + return Err(anyhow::anyhow!("Below minimum payout threshold")); | |
| 775 | + } | |
| 776 | + | |
| 777 | + let payout = ScheduledPayout { | |
| 778 | + payout_id: format!("manual_{}_{}", volunteer_id, Utc::now().timestamp()), | |
| 779 | + volunteer_id: volunteer_id.to_string(), | |
| 780 | + amount_tokens: accumulated.total_tokens, | |
| 781 | + target_currency: preferences.preferred_currency.clone(), | |
| 782 | + payment_method: preferences.preferred_method.clone(), | |
| 783 | + recipient_info: preferences.recipient_info.clone(), | |
| 784 | + scheduled_time: Utc::now(), | |
| 785 | + created_at: Utc::now(), | |
| 786 | + priority: PaymentPriority::Express, | |
| 787 | + recurring: false, | |
| 788 | + next_occurrence: None, | |
| 789 | + metadata: HashMap::from([ | |
| 790 | + ("trigger".to_string(), "manual".to_string()), | |
| 791 | + ]), | |
| 792 | + }; | |
| 793 | + | |
| 794 | + self.process_single_payout(payout, payment_processor).await | |
| 795 | + } | |
| 796 | +} | |
| 797 | + | |
| 798 | +#[cfg(test)] | |
| 799 | +mod tests { | |
| 800 | + use super::*; | |
| 801 | + use crate::economics::payment_processor::{PaymentMethod, WalletProvider, RecipientInfo, DigitalWalletInfo}; | |
| 802 | + | |
| 803 | + #[test] | |
| 804 | + fn test_payout_scheduler_creation() { | |
| 805 | + let scheduler = PayoutScheduler::new(); | |
| 806 | + assert!(scheduler.volunteer_preferences.is_empty()); | |
| 807 | + assert!(scheduler.scheduled_payouts.is_empty()); | |
| 808 | + } | |
| 809 | + | |
| 810 | + #[test] | |
| 811 | + fn test_earnings_accumulation() { | |
| 812 | + let mut scheduler = PayoutScheduler::new(); | |
| 813 | + | |
| 814 | + // Set up volunteer preferences | |
| 815 | + let preferences = PayoutPreferences { | |
| 816 | + volunteer_id: "test_volunteer".to_string(), | |
| 817 | + frequency: PayoutFrequency::Weekly, | |
| 818 | + preferred_currency: Currency::USD, | |
| 819 | + preferred_method: PaymentMethod::DigitalWallet(WalletProvider::PayPal), | |
| 820 | + recipient_info: RecipientInfo { | |
| 821 | + wallet_address: None, | |
| 822 | + bank_account: None, | |
| 823 | + digital_wallet: Some(DigitalWalletInfo { | |
| 824 | + provider: WalletProvider::PayPal, | |
| 825 | + wallet_id: "test@example.com".to_string(), | |
| 826 | + verified: true, | |
| 827 | + }), | |
| 828 | + kyc_verified: true, | |
| 829 | + tax_info: None, | |
| 830 | + }, | |
| 831 | + minimum_threshold: 10 * 1_000_000_000_000_000_000, // 10 ZEPH | |
| 832 | + auto_payout_enabled: true, | |
| 833 | + timezone: "UTC".to_string(), | |
| 834 | + preferred_day: Some(Weekday::Fri), | |
| 835 | + preferred_hour: 14, | |
| 836 | + priority: PaymentPriority::Standard, | |
| 837 | + split_payments: None, | |
| 838 | + }; | |
| 839 | + | |
| 840 | + scheduler.set_volunteer_preferences(preferences); | |
| 841 | + | |
| 842 | + // Add earnings | |
| 843 | + scheduler.add_earnings("test_volunteer", 5 * 1_000_000_000_000_000_000, 0).unwrap(); | |
| 844 | + | |
| 845 | + let accumulated = scheduler.accumulated_earnings.get("test_volunteer").unwrap(); | |
| 846 | + assert_eq!(accumulated.total_tokens, 5 * 1_000_000_000_000_000_000); | |
| 847 | + } | |
| 848 | + | |
| 849 | + #[test] | |
| 850 | + fn test_threshold_payout_trigger() { | |
| 851 | + let mut scheduler = PayoutScheduler::new(); | |
| 852 | + | |
| 853 | + let preferences = PayoutPreferences { | |
| 854 | + volunteer_id: "test_volunteer".to_string(), | |
| 855 | + frequency: PayoutFrequency::Threshold(10 * 1_000_000_000_000_000_000), // 10 ZEPH threshold | |
| 856 | + preferred_currency: Currency::ZephyrCoin, | |
| 857 | + preferred_method: PaymentMethod::DigitalWallet(WalletProvider::PayPal), | |
| 858 | + recipient_info: RecipientInfo { | |
| 859 | + wallet_address: None, | |
| 860 | + bank_account: None, | |
| 861 | + digital_wallet: Some(DigitalWalletInfo { | |
| 862 | + provider: WalletProvider::PayPal, | |
| 863 | + wallet_id: "test@example.com".to_string(), | |
| 864 | + verified: true, | |
| 865 | + }), | |
| 866 | + kyc_verified: true, | |
| 867 | + tax_info: None, | |
| 868 | + }, | |
| 869 | + minimum_threshold: 10 * 1_000_000_000_000_000_000, | |
| 870 | + auto_payout_enabled: true, | |
| 871 | + timezone: "UTC".to_string(), | |
| 872 | + preferred_day: None, | |
| 873 | + preferred_hour: 12, | |
| 874 | + priority: PaymentPriority::Standard, | |
| 875 | + split_payments: None, | |
| 876 | + }; | |
| 877 | + | |
| 878 | + scheduler.set_volunteer_preferences(preferences); | |
| 879 | + | |
| 880 | + // Add earnings that exceed threshold | |
| 881 | + scheduler.add_earnings("test_volunteer", 15 * 1_000_000_000_000_000_000, 0).unwrap(); | |
| 882 | + | |
| 883 | + // Should have scheduled a payout | |
| 884 | + assert!(!scheduler.scheduled_payouts.is_empty()); | |
| 885 | + } | |
| 886 | +} | |
src/economics/performance_rewards.rsadded@@ -0,0 +1,1034 @@ | ||
| 1 | +//! Performance-Based Rewards System | |
| 2 | +//! | |
| 3 | +//! Advanced reward system that incentivizes excellence and network contribution | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::{HashMap, VecDeque}; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | + | |
| 10 | +use super::token_model::{RewardReason, NetworkHealthMetrics}; | |
| 11 | +use super::earnings_calculator::{VolunteerMetrics, GeographicRegion, ConnectionQuality}; | |
| 12 | + | |
| 13 | +/// Performance-based rewards manager | |
| 14 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 15 | +pub struct PerformanceRewardsSystem { | |
| 16 | + /// Performance scoring configuration | |
| 17 | + pub scoring_config: ScoringConfiguration, | |
| 18 | + /// Reward tier definitions | |
| 19 | + pub reward_tiers: Vec<RewardTier>, | |
| 20 | + /// Achievement system | |
| 21 | + pub achievements: HashMap<String, Achievement>, | |
| 22 | + /// Volunteer performance tracking | |
| 23 | + pub volunteer_scores: HashMap<String, PerformanceScore>, | |
| 24 | + /// Leaderboards and competitions | |
| 25 | + pub leaderboards: HashMap<String, Leaderboard>, | |
| 26 | + /// Special events and challenges | |
| 27 | + pub active_challenges: HashMap<String, Challenge>, | |
| 28 | + /// Reward multipliers and boosts | |
| 29 | + pub active_multipliers: HashMap<String, RewardMultiplier>, | |
| 30 | +} | |
| 31 | + | |
| 32 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 33 | +pub struct ScoringConfiguration { | |
| 34 | + /// Weight factors for different metrics | |
| 35 | + pub uptime_weight: f64, | |
| 36 | + pub speed_weight: f64, | |
| 37 | + pub reliability_weight: f64, | |
| 38 | + pub longevity_weight: f64, | |
| 39 | + pub contribution_weight: f64, | |
| 40 | + pub diversity_weight: f64, | |
| 41 | + | |
| 42 | + /// Performance windows | |
| 43 | + pub daily_window_hours: u32, | |
| 44 | + pub weekly_window_days: u32, | |
| 45 | + pub monthly_window_days: u32, | |
| 46 | + | |
| 47 | + /// Scoring thresholds | |
| 48 | + pub excellent_threshold: f64, | |
| 49 | + pub good_threshold: f64, | |
| 50 | + pub average_threshold: f64, | |
| 51 | +} | |
| 52 | + | |
| 53 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 54 | +pub struct RewardTier { | |
| 55 | + pub tier_name: String, | |
| 56 | + pub min_score: f64, | |
| 57 | + pub max_score: f64, | |
| 58 | + pub multiplier: f64, | |
| 59 | + pub badge_icon: String, | |
| 60 | + pub color: String, | |
| 61 | + pub benefits: Vec<TierBenefit>, | |
| 62 | +} | |
| 63 | + | |
| 64 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 65 | +pub struct TierBenefit { | |
| 66 | + pub benefit_type: BenefitType, | |
| 67 | + pub value: f64, | |
| 68 | + pub description: String, | |
| 69 | +} | |
| 70 | + | |
| 71 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 72 | +pub enum BenefitType { | |
| 73 | + EarningsMultiplier, | |
| 74 | + PrioritySupport, | |
| 75 | + EarlyAccess, | |
| 76 | + ReducedFees, | |
| 77 | + BonusPayouts, | |
| 78 | + ExclusiveFeatures, | |
| 79 | +} | |
| 80 | + | |
| 81 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 82 | +pub struct Achievement { | |
| 83 | + pub achievement_id: String, | |
| 84 | + pub name: String, | |
| 85 | + pub description: String, | |
| 86 | + pub icon: String, | |
| 87 | + pub rarity: AchievementRarity, | |
| 88 | + pub requirements: AchievementRequirements, | |
| 89 | + pub reward_tokens: u64, | |
| 90 | + pub reward_multiplier: f64, | |
| 91 | + pub one_time: bool, | |
| 92 | + pub unlocked_by: Vec<String>, // volunteer IDs | |
| 93 | +} | |
| 94 | + | |
| 95 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 96 | +pub enum AchievementRarity { | |
| 97 | + Common, | |
| 98 | + Uncommon, | |
| 99 | + Rare, | |
| 100 | + Epic, | |
| 101 | + Legendary, | |
| 102 | +} | |
| 103 | + | |
| 104 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 105 | +pub struct AchievementRequirements { | |
| 106 | + pub min_uptime_percentage: Option<f64>, | |
| 107 | + pub min_storage_gb: Option<u64>, | |
| 108 | + pub min_speed_mbps: Option<f64>, | |
| 109 | + pub min_reliability_score: Option<f64>, | |
| 110 | + pub min_days_active: Option<u32>, | |
| 111 | + pub geographic_requirements: Option<Vec<GeographicRegion>>, | |
| 112 | + pub network_contribution: Option<f64>, | |
| 113 | + pub custom_conditions: Vec<CustomCondition>, | |
| 114 | +} | |
| 115 | + | |
| 116 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 117 | +pub struct CustomCondition { | |
| 118 | + pub condition_type: String, | |
| 119 | + pub target_value: f64, | |
| 120 | + pub description: String, | |
| 121 | +} | |
| 122 | + | |
| 123 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 124 | +pub struct PerformanceScore { | |
| 125 | + pub volunteer_id: String, | |
| 126 | + pub overall_score: f64, | |
| 127 | + pub component_scores: ComponentScores, | |
| 128 | + pub tier: String, | |
| 129 | + pub rank: u32, | |
| 130 | + pub percentile: f64, | |
| 131 | + pub trend: ScoreTrend, | |
| 132 | + pub last_updated: DateTime<Utc>, | |
| 133 | + pub achievements: Vec<String>, | |
| 134 | + pub active_multipliers: Vec<String>, | |
| 135 | +} | |
| 136 | + | |
| 137 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 138 | +pub struct ComponentScores { | |
| 139 | + pub uptime_score: f64, | |
| 140 | + pub speed_score: f64, | |
| 141 | + pub reliability_score: f64, | |
| 142 | + pub longevity_score: f64, | |
| 143 | + pub contribution_score: f64, | |
| 144 | + pub diversity_score: f64, | |
| 145 | +} | |
| 146 | + | |
| 147 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 148 | +pub enum ScoreTrend { | |
| 149 | + Improving, | |
| 150 | + Stable, | |
| 151 | + Declining, | |
| 152 | +} | |
| 153 | + | |
| 154 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 155 | +pub struct Leaderboard { | |
| 156 | + pub leaderboard_id: String, | |
| 157 | + pub name: String, | |
| 158 | + pub description: String, | |
| 159 | + pub metric: LeaderboardMetric, | |
| 160 | + pub timeframe: LeaderboardTimeframe, | |
| 161 | + pub entries: Vec<LeaderboardEntry>, | |
| 162 | + pub rewards: LeaderboardRewards, | |
| 163 | + pub last_updated: DateTime<Utc>, | |
| 164 | +} | |
| 165 | + | |
| 166 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 167 | +pub enum LeaderboardMetric { | |
| 168 | + OverallScore, | |
| 169 | + Uptime, | |
| 170 | + Speed, | |
| 171 | + Reliability, | |
| 172 | + StorageProvided, | |
| 173 | + EarningsPerGB, | |
| 174 | + NetworkContribution, | |
| 175 | +} | |
| 176 | + | |
| 177 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 178 | +pub enum LeaderboardTimeframe { | |
| 179 | + Daily, | |
| 180 | + Weekly, | |
| 181 | + Monthly, | |
| 182 | + AllTime, | |
| 183 | +} | |
| 184 | + | |
| 185 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 186 | +pub struct LeaderboardEntry { | |
| 187 | + pub rank: u32, | |
| 188 | + pub volunteer_id: String, | |
| 189 | + pub display_name: String, | |
| 190 | + pub value: f64, | |
| 191 | + pub tier: String, | |
| 192 | + pub change_from_previous: i32, | |
| 193 | +} | |
| 194 | + | |
| 195 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 196 | +pub struct LeaderboardRewards { | |
| 197 | + pub top_1_reward: u64, | |
| 198 | + pub top_5_reward: u64, | |
| 199 | + pub top_10_reward: u64, | |
| 200 | + pub top_50_reward: u64, | |
| 201 | + pub participation_reward: u64, | |
| 202 | +} | |
| 203 | + | |
| 204 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 205 | +pub struct Challenge { | |
| 206 | + pub challenge_id: String, | |
| 207 | + pub name: String, | |
| 208 | + pub description: String, | |
| 209 | + pub challenge_type: ChallengeType, | |
| 210 | + pub start_time: DateTime<Utc>, | |
| 211 | + pub end_time: DateTime<Utc>, | |
| 212 | + pub requirements: ChallengeRequirements, | |
| 213 | + pub rewards: ChallengeRewards, | |
| 214 | + pub participants: HashMap<String, ChallengeProgress>, | |
| 215 | + pub max_participants: Option<u32>, | |
| 216 | +} | |
| 217 | + | |
| 218 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 219 | +pub enum ChallengeType { | |
| 220 | + UptimeChallenge, | |
| 221 | + SpeedChallenge, | |
| 222 | + StorageChallenge, | |
| 223 | + CommunityChallenge, | |
| 224 | + SpecialEvent, | |
| 225 | +} | |
| 226 | + | |
| 227 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 228 | +pub struct ChallengeRequirements { | |
| 229 | + pub target_metric: String, | |
| 230 | + pub target_value: f64, | |
| 231 | + pub duration_days: u32, | |
| 232 | + pub min_participation_days: u32, | |
| 233 | + pub geographic_restrictions: Option<Vec<GeographicRegion>>, | |
| 234 | +} | |
| 235 | + | |
| 236 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 237 | +pub struct ChallengeRewards { | |
| 238 | + pub completion_reward: u64, | |
| 239 | + pub milestone_rewards: Vec<MilestoneReward>, | |
| 240 | + pub leaderboard_rewards: Option<LeaderboardRewards>, | |
| 241 | + pub exclusive_achievement: Option<String>, | |
| 242 | +} | |
| 243 | + | |
| 244 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 245 | +pub struct MilestoneReward { | |
| 246 | + pub milestone_percentage: f64, | |
| 247 | + pub reward_tokens: u64, | |
| 248 | + pub title: String, | |
| 249 | +} | |
| 250 | + | |
| 251 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 252 | +pub struct ChallengeProgress { | |
| 253 | + pub volunteer_id: String, | |
| 254 | + pub joined_at: DateTime<Utc>, | |
| 255 | + pub current_progress: f64, | |
| 256 | + pub milestones_achieved: Vec<u32>, | |
| 257 | + pub daily_contributions: HashMap<String, f64>, // date -> contribution | |
| 258 | +} | |
| 259 | + | |
| 260 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 261 | +pub struct RewardMultiplier { | |
| 262 | + pub multiplier_id: String, | |
| 263 | + pub name: String, | |
| 264 | + pub description: String, | |
| 265 | + pub multiplier_value: f64, | |
| 266 | + pub applicable_to: Vec<String>, // volunteer IDs or "all" | |
| 267 | + pub start_time: DateTime<Utc>, | |
| 268 | + pub end_time: DateTime<Utc>, | |
| 269 | + pub conditions: MultiplierConditions, | |
| 270 | +} | |
| 271 | + | |
| 272 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 273 | +pub struct MultiplierConditions { | |
| 274 | + pub min_tier: Option<String>, | |
| 275 | + pub geographic_regions: Option<Vec<GeographicRegion>>, | |
| 276 | + pub time_of_day: Option<(u8, u8)>, // (start_hour, end_hour) | |
| 277 | + pub min_performance_score: Option<f64>, | |
| 278 | + pub network_health_threshold: Option<f64>, | |
| 279 | +} | |
| 280 | + | |
| 281 | +impl Default for ScoringConfiguration { | |
| 282 | + fn default() -> Self { | |
| 283 | + Self { | |
| 284 | + uptime_weight: 0.25, | |
| 285 | + speed_weight: 0.20, | |
| 286 | + reliability_weight: 0.20, | |
| 287 | + longevity_weight: 0.15, | |
| 288 | + contribution_weight: 0.15, | |
| 289 | + diversity_weight: 0.05, | |
| 290 | + daily_window_hours: 24, | |
| 291 | + weekly_window_days: 7, | |
| 292 | + monthly_window_days: 30, | |
| 293 | + excellent_threshold: 90.0, | |
| 294 | + good_threshold: 75.0, | |
| 295 | + average_threshold: 60.0, | |
| 296 | + } | |
| 297 | + } | |
| 298 | +} | |
| 299 | + | |
| 300 | +impl PerformanceRewardsSystem { | |
| 301 | + /// Create new performance rewards system | |
| 302 | + pub fn new() -> Self { | |
| 303 | + let mut system = Self { | |
| 304 | + scoring_config: ScoringConfiguration::default(), | |
| 305 | + reward_tiers: Vec::new(), | |
| 306 | + achievements: HashMap::new(), | |
| 307 | + volunteer_scores: HashMap::new(), | |
| 308 | + leaderboards: HashMap::new(), | |
| 309 | + active_challenges: HashMap::new(), | |
| 310 | + active_multipliers: HashMap::new(), | |
| 311 | + }; | |
| 312 | + | |
| 313 | + system.initialize_reward_tiers(); | |
| 314 | + system.initialize_achievements(); | |
| 315 | + system.initialize_leaderboards(); | |
| 316 | + | |
| 317 | + system | |
| 318 | + } | |
| 319 | + | |
| 320 | + /// Initialize reward tiers | |
| 321 | + fn initialize_reward_tiers(&mut self) { | |
| 322 | + self.reward_tiers = vec![ | |
| 323 | + RewardTier { | |
| 324 | + tier_name: "Diamond".to_string(), | |
| 325 | + min_score: 95.0, | |
| 326 | + max_score: 100.0, | |
| 327 | + multiplier: 2.0, | |
| 328 | + badge_icon: "💎".to_string(), | |
| 329 | + color: "#B9F2FF".to_string(), | |
| 330 | + benefits: vec![ | |
| 331 | + TierBenefit { | |
| 332 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 333 | + value: 2.0, | |
| 334 | + description: "2x earnings multiplier".to_string(), | |
| 335 | + }, | |
| 336 | + TierBenefit { | |
| 337 | + benefit_type: BenefitType::PrioritySupport, | |
| 338 | + value: 1.0, | |
| 339 | + description: "Priority customer support".to_string(), | |
| 340 | + }, | |
| 341 | + TierBenefit { | |
| 342 | + benefit_type: BenefitType::ReducedFees, | |
| 343 | + value: 0.5, | |
| 344 | + description: "50% reduced fees".to_string(), | |
| 345 | + }, | |
| 346 | + ], | |
| 347 | + }, | |
| 348 | + RewardTier { | |
| 349 | + tier_name: "Platinum".to_string(), | |
| 350 | + min_score: 85.0, | |
| 351 | + max_score: 95.0, | |
| 352 | + multiplier: 1.7, | |
| 353 | + badge_icon: "🏆".to_string(), | |
| 354 | + color: "#E5E4E2".to_string(), | |
| 355 | + benefits: vec![ | |
| 356 | + TierBenefit { | |
| 357 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 358 | + value: 1.7, | |
| 359 | + description: "1.7x earnings multiplier".to_string(), | |
| 360 | + }, | |
| 361 | + TierBenefit { | |
| 362 | + benefit_type: BenefitType::EarlyAccess, | |
| 363 | + value: 1.0, | |
| 364 | + description: "Early access to new features".to_string(), | |
| 365 | + }, | |
| 366 | + ], | |
| 367 | + }, | |
| 368 | + RewardTier { | |
| 369 | + tier_name: "Gold".to_string(), | |
| 370 | + min_score: 75.0, | |
| 371 | + max_score: 85.0, | |
| 372 | + multiplier: 1.4, | |
| 373 | + badge_icon: "🥇".to_string(), | |
| 374 | + color: "#FFD700".to_string(), | |
| 375 | + benefits: vec![ | |
| 376 | + TierBenefit { | |
| 377 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 378 | + value: 1.4, | |
| 379 | + description: "1.4x earnings multiplier".to_string(), | |
| 380 | + }, | |
| 381 | + TierBenefit { | |
| 382 | + benefit_type: BenefitType::BonusPayouts, | |
| 383 | + value: 0.1, | |
| 384 | + description: "10% bonus payouts".to_string(), | |
| 385 | + }, | |
| 386 | + ], | |
| 387 | + }, | |
| 388 | + RewardTier { | |
| 389 | + tier_name: "Silver".to_string(), | |
| 390 | + min_score: 60.0, | |
| 391 | + max_score: 75.0, | |
| 392 | + multiplier: 1.2, | |
| 393 | + badge_icon: "🥈".to_string(), | |
| 394 | + color: "#C0C0C0".to_string(), | |
| 395 | + benefits: vec![ | |
| 396 | + TierBenefit { | |
| 397 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 398 | + value: 1.2, | |
| 399 | + description: "1.2x earnings multiplier".to_string(), | |
| 400 | + }, | |
| 401 | + ], | |
| 402 | + }, | |
| 403 | + RewardTier { | |
| 404 | + tier_name: "Bronze".to_string(), | |
| 405 | + min_score: 40.0, | |
| 406 | + max_score: 60.0, | |
| 407 | + multiplier: 1.0, | |
| 408 | + badge_icon: "🥉".to_string(), | |
| 409 | + color: "#CD7F32".to_string(), | |
| 410 | + benefits: vec![ | |
| 411 | + TierBenefit { | |
| 412 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 413 | + value: 1.0, | |
| 414 | + description: "Standard earnings".to_string(), | |
| 415 | + }, | |
| 416 | + ], | |
| 417 | + }, | |
| 418 | + RewardTier { | |
| 419 | + tier_name: "Newcomer".to_string(), | |
| 420 | + min_score: 0.0, | |
| 421 | + max_score: 40.0, | |
| 422 | + multiplier: 0.8, | |
| 423 | + badge_icon: "🌱".to_string(), | |
| 424 | + color: "#90EE90".to_string(), | |
| 425 | + benefits: vec![ | |
| 426 | + TierBenefit { | |
| 427 | + benefit_type: BenefitType::EarningsMultiplier, | |
| 428 | + value: 0.8, | |
| 429 | + description: "Learning bonus - 80% earnings while improving".to_string(), | |
| 430 | + }, | |
| 431 | + ], | |
| 432 | + }, | |
| 433 | + ]; | |
| 434 | + } | |
| 435 | + | |
| 436 | + /// Initialize achievements | |
| 437 | + fn initialize_achievements(&mut self) { | |
| 438 | + let achievements = vec![ | |
| 439 | + Achievement { | |
| 440 | + achievement_id: "first_week".to_string(), | |
| 441 | + name: "First Week Warrior".to_string(), | |
| 442 | + description: "Complete your first week as a volunteer".to_string(), | |
| 443 | + icon: "🎯".to_string(), | |
| 444 | + rarity: AchievementRarity::Common, | |
| 445 | + requirements: AchievementRequirements { | |
| 446 | + min_days_active: Some(7), | |
| 447 | + min_uptime_percentage: Some(80.0), | |
| 448 | + ..Default::default() | |
| 449 | + }, | |
| 450 | + reward_tokens: 5 * 1_000_000_000_000_000_000, // 5 ZEPH | |
| 451 | + reward_multiplier: 1.1, | |
| 452 | + one_time: true, | |
| 453 | + unlocked_by: Vec::new(), | |
| 454 | + }, | |
| 455 | + Achievement { | |
| 456 | + achievement_id: "speed_demon".to_string(), | |
| 457 | + name: "Speed Demon".to_string(), | |
| 458 | + description: "Achieve transfer speeds over 100 Mbps".to_string(), | |
| 459 | + icon: "⚡".to_string(), | |
| 460 | + rarity: AchievementRarity::Uncommon, | |
| 461 | + requirements: AchievementRequirements { | |
| 462 | + min_speed_mbps: Some(100.0), | |
| 463 | + min_reliability_score: Some(95.0), | |
| 464 | + ..Default::default() | |
| 465 | + }, | |
| 466 | + reward_tokens: 10 * 1_000_000_000_000_000_000, // 10 ZEPH | |
| 467 | + reward_multiplier: 1.2, | |
| 468 | + one_time: false, | |
| 469 | + unlocked_by: Vec::new(), | |
| 470 | + }, | |
| 471 | + Achievement { | |
| 472 | + achievement_id: "reliability_champion".to_string(), | |
| 473 | + name: "Reliability Champion".to_string(), | |
| 474 | + description: "Maintain 99.9% uptime for 30 days".to_string(), | |
| 475 | + icon: "🛡️".to_string(), | |
| 476 | + rarity: AchievementRarity::Rare, | |
| 477 | + requirements: AchievementRequirements { | |
| 478 | + min_uptime_percentage: Some(99.9), | |
| 479 | + min_days_active: Some(30), | |
| 480 | + min_reliability_score: Some(99.0), | |
| 481 | + ..Default::default() | |
| 482 | + }, | |
| 483 | + reward_tokens: 25 * 1_000_000_000_000_000_000, // 25 ZEPH | |
| 484 | + reward_multiplier: 1.5, | |
| 485 | + one_time: false, | |
| 486 | + unlocked_by: Vec::new(), | |
| 487 | + }, | |
| 488 | + Achievement { | |
| 489 | + achievement_id: "global_pioneer".to_string(), | |
| 490 | + name: "Global Pioneer".to_string(), | |
| 491 | + description: "First volunteer in an underrepresented region".to_string(), | |
| 492 | + icon: "🌍".to_string(), | |
| 493 | + rarity: AchievementRarity::Epic, | |
| 494 | + requirements: AchievementRequirements { | |
| 495 | + geographic_requirements: Some(vec![GeographicRegion::Rare]), | |
| 496 | + min_days_active: Some(7), | |
| 497 | + ..Default::default() | |
| 498 | + }, | |
| 499 | + reward_tokens: 50 * 1_000_000_000_000_000_000, // 50 ZEPH | |
| 500 | + reward_multiplier: 2.0, | |
| 501 | + one_time: true, | |
| 502 | + unlocked_by: Vec::new(), | |
| 503 | + }, | |
| 504 | + Achievement { | |
| 505 | + achievement_id: "the_vault".to_string(), | |
| 506 | + name: "The Vault".to_string(), | |
| 507 | + description: "Provide 10TB of storage capacity".to_string(), | |
| 508 | + icon: "🏛️".to_string(), | |
| 509 | + rarity: AchievementRarity::Legendary, | |
| 510 | + requirements: AchievementRequirements { | |
| 511 | + min_storage_gb: Some(10_000), | |
| 512 | + min_uptime_percentage: Some(95.0), | |
| 513 | + min_days_active: Some(90), | |
| 514 | + ..Default::default() | |
| 515 | + }, | |
| 516 | + reward_tokens: 100 * 1_000_000_000_000_000_000, // 100 ZEPH | |
| 517 | + reward_multiplier: 3.0, | |
| 518 | + one_time: true, | |
| 519 | + unlocked_by: Vec::new(), | |
| 520 | + }, | |
| 521 | + ]; | |
| 522 | + | |
| 523 | + for achievement in achievements { | |
| 524 | + self.achievements.insert(achievement.achievement_id.clone(), achievement); | |
| 525 | + } | |
| 526 | + } | |
| 527 | + | |
| 528 | + /// Initialize leaderboards | |
| 529 | + fn initialize_leaderboards(&mut self) { | |
| 530 | + let leaderboards = vec![ | |
| 531 | + Leaderboard { | |
| 532 | + leaderboard_id: "overall_weekly".to_string(), | |
| 533 | + name: "Weekly Champions".to_string(), | |
| 534 | + description: "Top performers this week".to_string(), | |
| 535 | + metric: LeaderboardMetric::OverallScore, | |
| 536 | + timeframe: LeaderboardTimeframe::Weekly, | |
| 537 | + entries: Vec::new(), | |
| 538 | + rewards: LeaderboardRewards { | |
| 539 | + top_1_reward: 50 * 1_000_000_000_000_000_000, | |
| 540 | + top_5_reward: 20 * 1_000_000_000_000_000_000, | |
| 541 | + top_10_reward: 10 * 1_000_000_000_000_000_000, | |
| 542 | + top_50_reward: 5 * 1_000_000_000_000_000_000, | |
| 543 | + participation_reward: 1 * 1_000_000_000_000_000_000, | |
| 544 | + }, | |
| 545 | + last_updated: Utc::now(), | |
| 546 | + }, | |
| 547 | + Leaderboard { | |
| 548 | + leaderboard_id: "speed_monthly".to_string(), | |
| 549 | + name: "Speed Masters".to_string(), | |
| 550 | + description: "Fastest transfer speeds this month".to_string(), | |
| 551 | + metric: LeaderboardMetric::Speed, | |
| 552 | + timeframe: LeaderboardTimeframe::Monthly, | |
| 553 | + entries: Vec::new(), | |
| 554 | + rewards: LeaderboardRewards { | |
| 555 | + top_1_reward: 100 * 1_000_000_000_000_000_000, | |
| 556 | + top_5_reward: 40 * 1_000_000_000_000_000_000, | |
| 557 | + top_10_reward: 20 * 1_000_000_000_000_000_000, | |
| 558 | + top_50_reward: 10 * 1_000_000_000_000_000_000, | |
| 559 | + participation_reward: 2 * 1_000_000_000_000_000_000, | |
| 560 | + }, | |
| 561 | + last_updated: Utc::now(), | |
| 562 | + }, | |
| 563 | + ]; | |
| 564 | + | |
| 565 | + for leaderboard in leaderboards { | |
| 566 | + self.leaderboards.insert(leaderboard.leaderboard_id.clone(), leaderboard); | |
| 567 | + } | |
| 568 | + } | |
| 569 | + | |
| 570 | + /// Calculate comprehensive performance score | |
| 571 | + pub fn calculate_performance_score(&self, metrics: &VolunteerMetrics, network_metrics: &NetworkHealthMetrics) -> Result<PerformanceScore> { | |
| 572 | + let config = &self.scoring_config; | |
| 573 | + | |
| 574 | + // Calculate component scores | |
| 575 | + let uptime_score = self.calculate_uptime_score(metrics)?; | |
| 576 | + let speed_score = self.calculate_speed_score(metrics)?; | |
| 577 | + let reliability_score = self.calculate_reliability_score(metrics)?; | |
| 578 | + let longevity_score = self.calculate_longevity_score(metrics)?; | |
| 579 | + let contribution_score = self.calculate_contribution_score(metrics, network_metrics)?; | |
| 580 | + let diversity_score = self.calculate_diversity_score(metrics)?; | |
| 581 | + | |
| 582 | + let component_scores = ComponentScores { | |
| 583 | + uptime_score, | |
| 584 | + speed_score, | |
| 585 | + reliability_score, | |
| 586 | + longevity_score, | |
| 587 | + contribution_score, | |
| 588 | + diversity_score, | |
| 589 | + }; | |
| 590 | + | |
| 591 | + // Calculate weighted overall score | |
| 592 | + let overall_score = uptime_score * config.uptime_weight | |
| 593 | + + speed_score * config.speed_weight | |
| 594 | + + reliability_score * config.reliability_weight | |
| 595 | + + longevity_score * config.longevity_weight | |
| 596 | + + contribution_score * config.contribution_weight | |
| 597 | + + diversity_score * config.diversity_weight; | |
| 598 | + | |
| 599 | + // Determine tier | |
| 600 | + let tier = self.determine_tier(overall_score); | |
| 601 | + | |
| 602 | + // Calculate trend (simplified - would use historical data) | |
| 603 | + let trend = ScoreTrend::Stable; | |
| 604 | + | |
| 605 | + // Get achievements | |
| 606 | + let achievements = self.check_achievements(metrics)?; | |
| 607 | + | |
| 608 | + // Get active multipliers | |
| 609 | + let active_multipliers = self.get_active_multipliers(&metrics.volunteer_id); | |
| 610 | + | |
| 611 | + Ok(PerformanceScore { | |
| 612 | + volunteer_id: metrics.volunteer_id.clone(), | |
| 613 | + overall_score, | |
| 614 | + component_scores, | |
| 615 | + tier, | |
| 616 | + rank: 0, // Would be calculated during ranking | |
| 617 | + percentile: 0.0, // Would be calculated during ranking | |
| 618 | + trend, | |
| 619 | + last_updated: Utc::now(), | |
| 620 | + achievements, | |
| 621 | + active_multipliers, | |
| 622 | + }) | |
| 623 | + } | |
| 624 | + | |
| 625 | + /// Calculate uptime score | |
| 626 | + fn calculate_uptime_score(&self, metrics: &VolunteerMetrics) -> Result<f64> { | |
| 627 | + // Perfect score at 99.9% uptime, linear scaling below | |
| 628 | + let uptime = metrics.uptime_percentage; | |
| 629 | + let score = if uptime >= 99.9 { | |
| 630 | + 100.0 | |
| 631 | + } else if uptime >= 95.0 { | |
| 632 | + 80.0 + (uptime - 95.0) * 4.0 // 80-100 range for 95-99.9% | |
| 633 | + } else if uptime >= 80.0 { | |
| 634 | + 50.0 + (uptime - 80.0) * 2.0 // 50-80 range for 80-95% | |
| 635 | + } else { | |
| 636 | + uptime * 0.625 // 0-50 range for 0-80% | |
| 637 | + }; | |
| 638 | + | |
| 639 | + Ok(score.min(100.0)) | |
| 640 | + } | |
| 641 | + | |
| 642 | + /// Calculate speed score | |
| 643 | + fn calculate_speed_score(&self, metrics: &VolunteerMetrics) -> Result<f64> { | |
| 644 | + let speed = metrics.transfer_speed_mbps; | |
| 645 | + let score = if speed >= 100.0 { | |
| 646 | + 100.0 | |
| 647 | + } else if speed >= 50.0 { | |
| 648 | + 75.0 + (speed - 50.0) * 0.5 // 75-100 for 50-100 Mbps | |
| 649 | + } else if speed >= 10.0 { | |
| 650 | + 40.0 + (speed - 10.0) * 0.875 // 40-75 for 10-50 Mbps | |
| 651 | + } else { | |
| 652 | + speed * 4.0 // 0-40 for 0-10 Mbps | |
| 653 | + }; | |
| 654 | + | |
| 655 | + Ok(score.min(100.0)) | |
| 656 | + } | |
| 657 | + | |
| 658 | + /// Calculate reliability score | |
| 659 | + fn calculate_reliability_score(&self, metrics: &VolunteerMetrics) -> Result<f64> { | |
| 660 | + let total_transfers = metrics.successful_transfers + metrics.failed_transfers; | |
| 661 | + if total_transfers == 0 { | |
| 662 | + return Ok(100.0); // New volunteers get perfect score | |
| 663 | + } | |
| 664 | + | |
| 665 | + let success_rate = metrics.successful_transfers as f64 / total_transfers as f64; | |
| 666 | + let response_time_factor = if metrics.response_time_ms <= 100 { | |
| 667 | + 1.0 | |
| 668 | + } else if metrics.response_time_ms <= 500 { | |
| 669 | + 0.9 | |
| 670 | + } else { | |
| 671 | + 0.8 | |
| 672 | + }; | |
| 673 | + | |
| 674 | + let score = success_rate * 100.0 * response_time_factor; | |
| 675 | + Ok(score.min(100.0)) | |
| 676 | + } | |
| 677 | + | |
| 678 | + /// Calculate longevity score | |
| 679 | + fn calculate_longevity_score(&self, metrics: &VolunteerMetrics) -> Result<f64> { | |
| 680 | + let days_active = (Utc::now() - metrics.joined_at).num_days(); | |
| 681 | + let score = if days_active >= 365 { | |
| 682 | + 100.0 | |
| 683 | + } else if days_active >= 90 { | |
| 684 | + 70.0 + (days_active - 90) as f64 * 30.0 / 275.0 | |
| 685 | + } else if days_active >= 30 { | |
| 686 | + 40.0 + (days_active - 30) as f64 * 30.0 / 60.0 | |
| 687 | + } else if days_active >= 7 { | |
| 688 | + 20.0 + (days_active - 7) as f64 * 20.0 / 23.0 | |
| 689 | + } else { | |
| 690 | + days_active as f64 * 20.0 / 7.0 | |
| 691 | + }; | |
| 692 | + | |
| 693 | + Ok(score.min(100.0)) | |
| 694 | + } | |
| 695 | + | |
| 696 | + /// Calculate contribution score | |
| 697 | + fn calculate_contribution_score(&self, metrics: &VolunteerMetrics, network_metrics: &NetworkHealthMetrics) -> Result<f64> { | |
| 698 | + if network_metrics.total_capacity_gb == 0 { | |
| 699 | + return Ok(0.0); | |
| 700 | + } | |
| 701 | + | |
| 702 | + let contribution_percentage = metrics.total_storage_gb as f64 / network_metrics.total_capacity_gb as f64 * 100.0; | |
| 703 | + let utilization_factor = if metrics.total_storage_gb > 0 { | |
| 704 | + metrics.used_storage_gb as f64 / metrics.total_storage_gb as f64 | |
| 705 | + } else { | |
| 706 | + 0.0 | |
| 707 | + }; | |
| 708 | + | |
| 709 | + // Score based on both absolute contribution and utilization | |
| 710 | + let base_score = contribution_percentage * 1000.0; // Scale up | |
| 711 | + let utilization_bonus = utilization_factor * 20.0; // Up to 20 point bonus | |
| 712 | + | |
| 713 | + let score = (base_score + utilization_bonus).min(100.0); | |
| 714 | + Ok(score) | |
| 715 | + } | |
| 716 | + | |
| 717 | + /// Calculate diversity score | |
| 718 | + fn calculate_diversity_score(&self, metrics: &VolunteerMetrics) -> Result<f64> { | |
| 719 | + let region_bonus = match metrics.geographic_region { | |
| 720 | + GeographicRegion::Rare => 100.0, | |
| 721 | + GeographicRegion::Africa => 80.0, | |
| 722 | + GeographicRegion::SouthAmerica => 70.0, | |
| 723 | + GeographicRegion::Oceania => 70.0, | |
| 724 | + GeographicRegion::Asia => 60.0, | |
| 725 | + GeographicRegion::Europe => 40.0, | |
| 726 | + GeographicRegion::NorthAmerica => 30.0, | |
| 727 | + }; | |
| 728 | + | |
| 729 | + let connection_bonus = match metrics.connection_quality { | |
| 730 | + ConnectionQuality::Excellent => 20.0, | |
| 731 | + ConnectionQuality::Good => 15.0, | |
| 732 | + ConnectionQuality::Fair => 10.0, | |
| 733 | + ConnectionQuality::Poor => 5.0, | |
| 734 | + }; | |
| 735 | + | |
| 736 | + Ok((region_bonus + connection_bonus).min(100.0)) | |
| 737 | + } | |
| 738 | + | |
| 739 | + /// Determine performance tier | |
| 740 | + fn determine_tier(&self, score: f64) -> String { | |
| 741 | + for tier in &self.reward_tiers { | |
| 742 | + if score >= tier.min_score && score <= tier.max_score { | |
| 743 | + return tier.tier_name.clone(); | |
| 744 | + } | |
| 745 | + } | |
| 746 | + "Unranked".to_string() | |
| 747 | + } | |
| 748 | + | |
| 749 | + /// Check for new achievements | |
| 750 | + fn check_achievements(&self, metrics: &VolunteerMetrics) -> Result<Vec<String>> { | |
| 751 | + let mut unlocked_achievements = Vec::new(); | |
| 752 | + | |
| 753 | + for (achievement_id, achievement) in &self.achievements { | |
| 754 | + if achievement.unlocked_by.contains(&metrics.volunteer_id) && achievement.one_time { | |
| 755 | + continue; // Already unlocked | |
| 756 | + } | |
| 757 | + | |
| 758 | + let mut meets_requirements = true; | |
| 759 | + let reqs = &achievement.requirements; | |
| 760 | + | |
| 761 | + // Check uptime requirement | |
| 762 | + if let Some(min_uptime) = reqs.min_uptime_percentage { | |
| 763 | + if metrics.uptime_percentage < min_uptime { | |
| 764 | + meets_requirements = false; | |
| 765 | + } | |
| 766 | + } | |
| 767 | + | |
| 768 | + // Check storage requirement | |
| 769 | + if let Some(min_storage) = reqs.min_storage_gb { | |
| 770 | + if metrics.total_storage_gb < min_storage { | |
| 771 | + meets_requirements = false; | |
| 772 | + } | |
| 773 | + } | |
| 774 | + | |
| 775 | + // Check speed requirement | |
| 776 | + if let Some(min_speed) = reqs.min_speed_mbps { | |
| 777 | + if metrics.transfer_speed_mbps < min_speed { | |
| 778 | + meets_requirements = false; | |
| 779 | + } | |
| 780 | + } | |
| 781 | + | |
| 782 | + // Check reliability requirement | |
| 783 | + if let Some(min_reliability) = reqs.min_reliability_score { | |
| 784 | + if metrics.reliability_score < min_reliability { | |
| 785 | + meets_requirements = false; | |
| 786 | + } | |
| 787 | + } | |
| 788 | + | |
| 789 | + // Check days active requirement | |
| 790 | + if let Some(min_days) = reqs.min_days_active { | |
| 791 | + let days_active = (Utc::now() - metrics.joined_at).num_days(); | |
| 792 | + if days_active < min_days as i64 { | |
| 793 | + meets_requirements = false; | |
| 794 | + } | |
| 795 | + } | |
| 796 | + | |
| 797 | + // Check geographic requirements | |
| 798 | + if let Some(ref required_regions) = reqs.geographic_requirements { | |
| 799 | + if !required_regions.contains(&metrics.geographic_region) { | |
| 800 | + meets_requirements = false; | |
| 801 | + } | |
| 802 | + } | |
| 803 | + | |
| 804 | + if meets_requirements { | |
| 805 | + unlocked_achievements.push(achievement_id.clone()); | |
| 806 | + } | |
| 807 | + } | |
| 808 | + | |
| 809 | + Ok(unlocked_achievements) | |
| 810 | + } | |
| 811 | + | |
| 812 | + /// Get active multipliers for volunteer | |
| 813 | + fn get_active_multipliers(&self, volunteer_id: &str) -> Vec<String> { | |
| 814 | + let now = Utc::now(); | |
| 815 | + | |
| 816 | + self.active_multipliers | |
| 817 | + .values() | |
| 818 | + .filter(|multiplier| { | |
| 819 | + // Check if still active | |
| 820 | + if now < multiplier.start_time || now > multiplier.end_time { | |
| 821 | + return false; | |
| 822 | + } | |
| 823 | + | |
| 824 | + // Check if applies to this volunteer | |
| 825 | + multiplier.applicable_to.contains(&"all".to_string()) || | |
| 826 | + multiplier.applicable_to.contains(volunteer_id) | |
| 827 | + }) | |
| 828 | + .map(|multiplier| multiplier.multiplier_id.clone()) | |
| 829 | + .collect() | |
| 830 | + } | |
| 831 | + | |
| 832 | + /// Update volunteer score | |
| 833 | + pub fn update_volunteer_score(&mut self, volunteer_id: String, score: PerformanceScore) { | |
| 834 | + self.volunteer_scores.insert(volunteer_id, score); | |
| 835 | + } | |
| 836 | + | |
| 837 | + /// Calculate reward multiplier for volunteer | |
| 838 | + pub fn calculate_reward_multiplier(&self, volunteer_id: &str) -> f64 { | |
| 839 | + let base_multiplier = if let Some(score) = self.volunteer_scores.get(volunteer_id) { | |
| 840 | + // Get tier multiplier | |
| 841 | + let tier_multiplier = self.reward_tiers | |
| 842 | + .iter() | |
| 843 | + .find(|tier| tier.tier_name == score.tier) | |
| 844 | + .map(|tier| tier.multiplier) | |
| 845 | + .unwrap_or(1.0); | |
| 846 | + | |
| 847 | + // Add achievement multipliers | |
| 848 | + let achievement_multiplier: f64 = score.achievements | |
| 849 | + .iter() | |
| 850 | + .filter_map(|achievement_id| self.achievements.get(achievement_id)) | |
| 851 | + .map(|achievement| achievement.reward_multiplier - 1.0) | |
| 852 | + .sum::<f64>() + 1.0; | |
| 853 | + | |
| 854 | + tier_multiplier * achievement_multiplier | |
| 855 | + } else { | |
| 856 | + 1.0 | |
| 857 | + }; | |
| 858 | + | |
| 859 | + // Apply active multipliers | |
| 860 | + let active_multiplier: f64 = self.active_multipliers | |
| 861 | + .values() | |
| 862 | + .filter(|multiplier| { | |
| 863 | + let now = Utc::now(); | |
| 864 | + now >= multiplier.start_time && now <= multiplier.end_time && | |
| 865 | + (multiplier.applicable_to.contains(&"all".to_string()) || | |
| 866 | + multiplier.applicable_to.contains(volunteer_id)) | |
| 867 | + }) | |
| 868 | + .map(|multiplier| multiplier.multiplier_value) | |
| 869 | + .product(); | |
| 870 | + | |
| 871 | + base_multiplier * active_multiplier | |
| 872 | + } | |
| 873 | + | |
| 874 | + /// Update leaderboards | |
| 875 | + pub fn update_leaderboards(&mut self) -> Result<()> { | |
| 876 | + for leaderboard in self.leaderboards.values_mut() { | |
| 877 | + let mut entries: Vec<LeaderboardEntry> = self.volunteer_scores | |
| 878 | + .values() | |
| 879 | + .map(|score| { | |
| 880 | + let value = match leaderboard.metric { | |
| 881 | + LeaderboardMetric::OverallScore => score.overall_score, | |
| 882 | + LeaderboardMetric::Uptime => score.component_scores.uptime_score, | |
| 883 | + LeaderboardMetric::Speed => score.component_scores.speed_score, | |
| 884 | + LeaderboardMetric::Reliability => score.component_scores.reliability_score, | |
| 885 | + LeaderboardMetric::StorageProvided => score.component_scores.contribution_score, | |
| 886 | + LeaderboardMetric::EarningsPerGB => score.overall_score, // Simplified | |
| 887 | + LeaderboardMetric::NetworkContribution => score.component_scores.contribution_score, | |
| 888 | + }; | |
| 889 | + | |
| 890 | + LeaderboardEntry { | |
| 891 | + rank: 0, // Will be set after sorting | |
| 892 | + volunteer_id: score.volunteer_id.clone(), | |
| 893 | + display_name: format!("Volunteer_{}", &score.volunteer_id[..8]), | |
| 894 | + value, | |
| 895 | + tier: score.tier.clone(), | |
| 896 | + change_from_previous: 0, // Would track changes | |
| 897 | + } | |
| 898 | + }) | |
| 899 | + .collect(); | |
| 900 | + | |
| 901 | + // Sort by value (descending) | |
| 902 | + entries.sort_by(|a, b| b.value.partial_cmp(&a.value).unwrap_or(std::cmp::Ordering::Equal)); | |
| 903 | + | |
| 904 | + // Assign ranks | |
| 905 | + for (index, entry) in entries.iter_mut().enumerate() { | |
| 906 | + entry.rank = (index + 1) as u32; | |
| 907 | + } | |
| 908 | + | |
| 909 | + leaderboard.entries = entries; | |
| 910 | + leaderboard.last_updated = Utc::now(); | |
| 911 | + } | |
| 912 | + | |
| 913 | + Ok(()) | |
| 914 | + } | |
| 915 | + | |
| 916 | + /// Create new challenge | |
| 917 | + pub fn create_challenge(&mut self, challenge: Challenge) -> Result<()> { | |
| 918 | + let challenge_id = challenge.challenge_id.clone(); | |
| 919 | + self.active_challenges.insert(challenge_id, challenge); | |
| 920 | + Ok(()) | |
| 921 | + } | |
| 922 | + | |
| 923 | + /// Add reward multiplier | |
| 924 | + pub fn add_reward_multiplier(&mut self, multiplier: RewardMultiplier) -> Result<()> { | |
| 925 | + let multiplier_id = multiplier.multiplier_id.clone(); | |
| 926 | + self.active_multipliers.insert(multiplier_id, multiplier); | |
| 927 | + Ok(()) | |
| 928 | + } | |
| 929 | + | |
| 930 | + /// Get volunteer's current tier and benefits | |
| 931 | + pub fn get_volunteer_tier_info(&self, volunteer_id: &str) -> Option<(String, Vec<TierBenefit>)> { | |
| 932 | + let score = self.volunteer_scores.get(volunteer_id)?; | |
| 933 | + let tier = self.reward_tiers | |
| 934 | + .iter() | |
| 935 | + .find(|t| t.tier_name == score.tier)?; | |
| 936 | + | |
| 937 | + Some((tier.tier_name.clone(), tier.benefits.clone())) | |
| 938 | + } | |
| 939 | + | |
| 940 | + /// Get leaderboard for display | |
| 941 | + pub fn get_leaderboard(&self, leaderboard_id: &str) -> Option<&Leaderboard> { | |
| 942 | + self.leaderboards.get(leaderboard_id) | |
| 943 | + } | |
| 944 | +} | |
| 945 | + | |
| 946 | +impl Default for AchievementRequirements { | |
| 947 | + fn default() -> Self { | |
| 948 | + Self { | |
| 949 | + min_uptime_percentage: None, | |
| 950 | + min_storage_gb: None, | |
| 951 | + min_speed_mbps: None, | |
| 952 | + min_reliability_score: None, | |
| 953 | + min_days_active: None, | |
| 954 | + geographic_requirements: None, | |
| 955 | + network_contribution: None, | |
| 956 | + custom_conditions: Vec::new(), | |
| 957 | + } | |
| 958 | + } | |
| 959 | +} | |
| 960 | + | |
| 961 | +#[cfg(test)] | |
| 962 | +mod tests { | |
| 963 | + use super::*; | |
| 964 | + | |
| 965 | + #[test] | |
| 966 | + fn test_performance_rewards_system() { | |
| 967 | + let system = PerformanceRewardsSystem::new(); | |
| 968 | + assert!(!system.reward_tiers.is_empty()); | |
| 969 | + assert!(!system.achievements.is_empty()); | |
| 970 | + assert!(!system.leaderboards.is_empty()); | |
| 971 | + } | |
| 972 | + | |
| 973 | + #[test] | |
| 974 | + fn test_score_calculation() { | |
| 975 | + let system = PerformanceRewardsSystem::new(); | |
| 976 | + | |
| 977 | + let metrics = VolunteerMetrics { | |
| 978 | + volunteer_id: "test_volunteer".to_string(), | |
| 979 | + total_storage_gb: 100, | |
| 980 | + available_storage_gb: 50, | |
| 981 | + used_storage_gb: 50, | |
| 982 | + uptime_hours_24h: 24.0, | |
| 983 | + uptime_percentage: 99.5, | |
| 984 | + response_time_ms: 50, | |
| 985 | + transfer_speed_mbps: 100.0, | |
| 986 | + successful_transfers: 1000, | |
| 987 | + failed_transfers: 5, | |
| 988 | + geographic_region: GeographicRegion::Europe, | |
| 989 | + connection_quality: ConnectionQuality::Excellent, | |
| 990 | + reliability_score: 99.5, | |
| 991 | + last_seen: Utc::now(), | |
| 992 | + joined_at: Utc::now() - Duration::days(365), | |
| 993 | + }; | |
| 994 | + | |
| 995 | + let network_metrics = NetworkHealthMetrics { | |
| 996 | + total_capacity_gb: 10000, | |
| 997 | + active_volunteers: 100, | |
| 998 | + utilization_rate: 75.0, | |
| 999 | + average_uptime: 95.0, | |
| 1000 | + geographic_diversity: 70.0, | |
| 1001 | + data_durability: 99.9, | |
| 1002 | + }; | |
| 1003 | + | |
| 1004 | + let score = system.calculate_performance_score(&metrics, &network_metrics).unwrap(); | |
| 1005 | + assert!(score.overall_score > 80.0); // Should be high-performing | |
| 1006 | + assert_eq!(score.tier, "Platinum"); // Should be in a high tier | |
| 1007 | + } | |
| 1008 | + | |
| 1009 | + #[test] | |
| 1010 | + fn test_achievement_checking() { | |
| 1011 | + let system = PerformanceRewardsSystem::new(); | |
| 1012 | + | |
| 1013 | + let high_speed_metrics = VolunteerMetrics { | |
| 1014 | + volunteer_id: "speed_test".to_string(), | |
| 1015 | + total_storage_gb: 100, | |
| 1016 | + available_storage_gb: 0, | |
| 1017 | + used_storage_gb: 100, | |
| 1018 | + uptime_hours_24h: 24.0, | |
| 1019 | + uptime_percentage: 99.0, | |
| 1020 | + response_time_ms: 25, | |
| 1021 | + transfer_speed_mbps: 150.0, | |
| 1022 | + successful_transfers: 1000, | |
| 1023 | + failed_transfers: 1, | |
| 1024 | + geographic_region: GeographicRegion::NorthAmerica, | |
| 1025 | + connection_quality: ConnectionQuality::Excellent, | |
| 1026 | + reliability_score: 99.9, | |
| 1027 | + last_seen: Utc::now(), | |
| 1028 | + joined_at: Utc::now() - Duration::days(30), | |
| 1029 | + }; | |
| 1030 | + | |
| 1031 | + let achievements = system.check_achievements(&high_speed_metrics).unwrap(); | |
| 1032 | + assert!(achievements.contains(&"speed_demon".to_string())); | |
| 1033 | + } | |
| 1034 | +} | |
src/economics/token_model.rsadded@@ -0,0 +1,398 @@ | ||
| 1 | +//! ZephyrCoin Token Economics Model | |
| 2 | +//! | |
| 3 | +//! Sustainable token supply with network-health-based mechanisms | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::HashMap; | |
| 8 | +use chrono::{DateTime, Utc, Duration}; | |
| 9 | + | |
| 10 | +/// ZephyrCoin token economics configuration | |
| 11 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 12 | +pub struct TokenEconomics { | |
| 13 | + /// Total supply cap (21M tokens, Bitcoin-inspired scarcity) | |
| 14 | + pub max_supply: u64, | |
| 15 | + /// Current circulating supply | |
| 16 | + pub circulating_supply: u64, | |
| 17 | + /// Tokens reserved for ecosystem development (10%) | |
| 18 | + pub ecosystem_reserve: u64, | |
| 19 | + /// Tokens allocated for volunteer rewards (70%) | |
| 20 | + pub volunteer_rewards_pool: u64, | |
| 21 | + /// Tokens for network maintenance and operations (20%) | |
| 22 | + pub operations_pool: u64, | |
| 23 | + /// Minimum network capacity before token minting | |
| 24 | + pub min_network_capacity_gb: u64, | |
| 25 | + /// Base reward rate per GB per day (in wei-equivalent) | |
| 26 | + pub base_reward_rate: u64, | |
| 27 | + /// Inflation rate control parameters | |
| 28 | + pub inflation_control: InflationControl, | |
| 29 | +} | |
| 30 | + | |
| 31 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 32 | +pub struct InflationControl { | |
| 33 | + /// Maximum annual inflation rate (2.5%) | |
| 34 | + pub max_annual_inflation: f64, | |
| 35 | + /// Target network growth rate (20% monthly) | |
| 36 | + pub target_network_growth: f64, | |
| 37 | + /// Supply adjustment frequency (weekly) | |
| 38 | + pub adjustment_frequency_days: u32, | |
| 39 | + /// Burn rate for unused tokens (quarterly) | |
| 40 | + pub burn_rate: f64, | |
| 41 | +} | |
| 42 | + | |
| 43 | +/// Token supply management | |
| 44 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 45 | +pub struct TokenSupply { | |
| 46 | + /// Available tokens for immediate rewards | |
| 47 | + pub reward_pool: u64, | |
| 48 | + /// Tokens locked for future minting | |
| 49 | + pub locked_reserve: u64, | |
| 50 | + /// Burned tokens (deflationary mechanism) | |
| 51 | + pub burned_tokens: u64, | |
| 52 | + /// Last supply adjustment timestamp | |
| 53 | + pub last_adjustment: DateTime<Utc>, | |
| 54 | +} | |
| 55 | + | |
| 56 | +/// Network health metrics for token economics | |
| 57 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 58 | +pub struct NetworkHealthMetrics { | |
| 59 | + /// Total network storage capacity in GB | |
| 60 | + pub total_capacity_gb: u64, | |
| 61 | + /// Active volunteer nodes | |
| 62 | + pub active_volunteers: u32, | |
| 63 | + /// Network utilization percentage (0-100) | |
| 64 | + pub utilization_rate: f64, | |
| 65 | + /// Average node uptime percentage | |
| 66 | + pub average_uptime: f64, | |
| 67 | + /// Geographic distribution score (0-100) | |
| 68 | + pub geographic_diversity: f64, | |
| 69 | + /// Data durability percentage | |
| 70 | + pub data_durability: f64, | |
| 71 | +} | |
| 72 | + | |
| 73 | +/// Token economics manager | |
| 74 | +pub struct TokenEconomicsManager { | |
| 75 | + config: TokenEconomics, | |
| 76 | + supply: TokenSupply, | |
| 77 | + health_metrics: NetworkHealthMetrics, | |
| 78 | + reward_history: HashMap<String, Vec<RewardRecord>>, | |
| 79 | +} | |
| 80 | + | |
| 81 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 82 | +pub struct RewardRecord { | |
| 83 | + pub volunteer_id: String, | |
| 84 | + pub amount: u64, | |
| 85 | + pub earned_at: DateTime<Utc>, | |
| 86 | + pub reason: RewardReason, | |
| 87 | +} | |
| 88 | + | |
| 89 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 90 | +pub enum RewardReason { | |
| 91 | + StorageProvision { gb_days: u64 }, | |
| 92 | + UptimeBonus { uptime_hours: u32 }, | |
| 93 | + PerformanceBonus { score: f64 }, | |
| 94 | + GeographicDiversity, | |
| 95 | + NetworkStabilization, | |
| 96 | +} | |
| 97 | + | |
| 98 | +impl Default for TokenEconomics { | |
| 99 | + fn default() -> Self { | |
| 100 | + Self { | |
| 101 | + max_supply: 21_000_000 * 1_000_000_000_000_000_000, // 21M tokens with 18 decimals | |
| 102 | + circulating_supply: 0, | |
| 103 | + ecosystem_reserve: 2_100_000 * 1_000_000_000_000_000_000, // 10% | |
| 104 | + volunteer_rewards_pool: 14_700_000 * 1_000_000_000_000_000_000, // 70% | |
| 105 | + operations_pool: 4_200_000 * 1_000_000_000_000_000_000, // 20% | |
| 106 | + min_network_capacity_gb: 100, // Start rewards at 100GB network capacity | |
| 107 | + base_reward_rate: 20_000_000_000_000_000, // 0.02 tokens per GB per day | |
| 108 | + inflation_control: InflationControl { | |
| 109 | + max_annual_inflation: 0.025, // 2.5% | |
| 110 | + target_network_growth: 0.20, // 20% | |
| 111 | + adjustment_frequency_days: 7, | |
| 112 | + burn_rate: 0.01, // 1% quarterly burn of unused tokens | |
| 113 | + }, | |
| 114 | + } | |
| 115 | + } | |
| 116 | +} | |
| 117 | + | |
| 118 | +impl TokenEconomicsManager { | |
| 119 | + /// Create new token economics manager | |
| 120 | + pub fn new(config: TokenEconomics) -> Self { | |
| 121 | + Self { | |
| 122 | + config, | |
| 123 | + supply: TokenSupply { | |
| 124 | + reward_pool: 0, | |
| 125 | + locked_reserve: config.volunteer_rewards_pool, | |
| 126 | + burned_tokens: 0, | |
| 127 | + last_adjustment: Utc::now(), | |
| 128 | + }, | |
| 129 | + health_metrics: NetworkHealthMetrics { | |
| 130 | + total_capacity_gb: 0, | |
| 131 | + active_volunteers: 0, | |
| 132 | + utilization_rate: 0.0, | |
| 133 | + average_uptime: 0.0, | |
| 134 | + geographic_diversity: 0.0, | |
| 135 | + data_durability: 0.0, | |
| 136 | + }, | |
| 137 | + reward_history: HashMap::new(), | |
| 138 | + } | |
| 139 | + } | |
| 140 | + | |
| 141 | + /// Calculate sustainable token release rate | |
| 142 | + pub fn calculate_token_release_rate(&self) -> Result<u64> { | |
| 143 | + // Base release tied to network growth | |
| 144 | + let network_growth_factor = self.calculate_network_growth_factor()?; | |
| 145 | + let utilization_factor = (self.health_metrics.utilization_rate / 100.0).min(1.0); | |
| 146 | + let quality_factor = self.calculate_network_quality_factor(); | |
| 147 | + | |
| 148 | + // Release rate formula: base_rate * growth_factor * utilization_factor * quality_factor | |
| 149 | + let daily_release = (self.config.base_reward_rate as f64 | |
| 150 | + * self.health_metrics.total_capacity_gb as f64 | |
| 151 | + * network_growth_factor | |
| 152 | + * utilization_factor | |
| 153 | + * quality_factor) as u64; | |
| 154 | + | |
| 155 | + // Cap by inflation control | |
| 156 | + let max_daily_inflation = self.calculate_max_daily_inflation()?; | |
| 157 | + Ok(daily_release.min(max_daily_inflation)) | |
| 158 | + } | |
| 159 | + | |
| 160 | + /// Calculate network growth factor for token release | |
| 161 | + fn calculate_network_growth_factor(&self) -> Result<f64> { | |
| 162 | + // Encourage growth but prevent runaway inflation | |
| 163 | + let growth_factor = if self.health_metrics.total_capacity_gb < 1000 { | |
| 164 | + 2.0 // High incentive for early adoption | |
| 165 | + } else if self.health_metrics.total_capacity_gb < 10000 { | |
| 166 | + 1.5 // Moderate incentive for scaling | |
| 167 | + } else { | |
| 168 | + 1.0 // Stable rewards for mature network | |
| 169 | + }; | |
| 170 | + | |
| 171 | + Ok(growth_factor) | |
| 172 | + } | |
| 173 | + | |
| 174 | + /// Calculate network quality factor | |
| 175 | + fn calculate_network_quality_factor(&self) -> f64 { | |
| 176 | + let uptime_factor = self.health_metrics.average_uptime / 100.0; | |
| 177 | + let diversity_factor = self.health_metrics.geographic_diversity / 100.0; | |
| 178 | + let durability_factor = self.health_metrics.data_durability / 100.0; | |
| 179 | + | |
| 180 | + // Weighted average: uptime (40%), diversity (30%), durability (30%) | |
| 181 | + (uptime_factor * 0.4 + diversity_factor * 0.3 + durability_factor * 0.3).max(0.1) | |
| 182 | + } | |
| 183 | + | |
| 184 | + /// Calculate maximum daily inflation allowed | |
| 185 | + fn calculate_max_daily_inflation(&self) -> Result<u64> { | |
| 186 | + let annual_max = (self.config.circulating_supply as f64 | |
| 187 | + * self.config.inflation_control.max_annual_inflation) as u64; | |
| 188 | + Ok(annual_max / 365) | |
| 189 | + } | |
| 190 | + | |
| 191 | + /// Mint new tokens for rewards | |
| 192 | + pub async fn mint_rewards(&mut self, amount: u64) -> Result<()> { | |
| 193 | + // Verify supply constraints | |
| 194 | + if self.config.circulating_supply + amount > self.config.max_supply { | |
| 195 | + return Err(anyhow::anyhow!("Minting would exceed maximum supply")); | |
| 196 | + } | |
| 197 | + | |
| 198 | + // Verify inflation constraints | |
| 199 | + let max_daily = self.calculate_max_daily_inflation()?; | |
| 200 | + if amount > max_daily { | |
| 201 | + return Err(anyhow::anyhow!("Minting exceeds daily inflation limit")); | |
| 202 | + } | |
| 203 | + | |
| 204 | + // Mint tokens | |
| 205 | + self.supply.reward_pool += amount; | |
| 206 | + self.config.circulating_supply += amount; | |
| 207 | + self.supply.locked_reserve = self.supply.locked_reserve.saturating_sub(amount); | |
| 208 | + | |
| 209 | + tracing::info!("Minted {} tokens for rewards. Circulating supply: {}", | |
| 210 | + amount, self.config.circulating_supply); | |
| 211 | + | |
| 212 | + Ok(()) | |
| 213 | + } | |
| 214 | + | |
| 215 | + /// Burn unused tokens (deflationary mechanism) | |
| 216 | + pub async fn burn_unused_tokens(&mut self) -> Result<u64> { | |
| 217 | + let days_since_adjustment = (Utc::now() - self.supply.last_adjustment).num_days(); | |
| 218 | + | |
| 219 | + // Quarterly burn | |
| 220 | + if days_since_adjustment >= 90 { | |
| 221 | + let burn_amount = (self.supply.reward_pool as f64 | |
| 222 | + * self.config.inflation_control.burn_rate) as u64; | |
| 223 | + | |
| 224 | + if burn_amount > 0 { | |
| 225 | + self.supply.reward_pool = self.supply.reward_pool.saturating_sub(burn_amount); | |
| 226 | + self.supply.burned_tokens += burn_amount; | |
| 227 | + self.config.circulating_supply = self.config.circulating_supply.saturating_sub(burn_amount); | |
| 228 | + | |
| 229 | + tracing::info!("Burned {} unused tokens. Total burned: {}", | |
| 230 | + burn_amount, self.supply.burned_tokens); | |
| 231 | + | |
| 232 | + return Ok(burn_amount); | |
| 233 | + } | |
| 234 | + } | |
| 235 | + | |
| 236 | + Ok(0) | |
| 237 | + } | |
| 238 | + | |
| 239 | + /// Calculate reward for volunteer | |
| 240 | + pub fn calculate_volunteer_reward( | |
| 241 | + &self, | |
| 242 | + volunteer_id: &str, | |
| 243 | + storage_gb: u64, | |
| 244 | + uptime_hours: u32, | |
| 245 | + performance_score: f64 | |
| 246 | + ) -> Result<u64> { | |
| 247 | + // Base storage reward | |
| 248 | + let storage_reward = storage_gb * self.config.base_reward_rate; | |
| 249 | + | |
| 250 | + // Uptime bonus (up to 50% extra) | |
| 251 | + let uptime_bonus = if uptime_hours >= 24 { | |
| 252 | + (storage_reward as f64 * 0.5) as u64 | |
| 253 | + } else { | |
| 254 | + (storage_reward as f64 * (uptime_hours as f64 / 24.0) * 0.5) as u64 | |
| 255 | + }; | |
| 256 | + | |
| 257 | + // Performance bonus (up to 25% extra) | |
| 258 | + let performance_bonus = (storage_reward as f64 * performance_score * 0.25) as u64; | |
| 259 | + | |
| 260 | + // Geographic diversity bonus | |
| 261 | + let diversity_bonus = if self.is_rare_location(volunteer_id) { | |
| 262 | + (storage_reward as f64 * 0.15) as u64 | |
| 263 | + } else { | |
| 264 | + 0 | |
| 265 | + }; | |
| 266 | + | |
| 267 | + let total_reward = storage_reward + uptime_bonus + performance_bonus + diversity_bonus; | |
| 268 | + | |
| 269 | + // Ensure we have enough tokens in reward pool | |
| 270 | + if total_reward > self.supply.reward_pool { | |
| 271 | + return Err(anyhow::anyhow!("Insufficient tokens in reward pool")); | |
| 272 | + } | |
| 273 | + | |
| 274 | + Ok(total_reward) | |
| 275 | + } | |
| 276 | + | |
| 277 | + /// Check if volunteer is in a geographically rare location | |
| 278 | + fn is_rare_location(&self, _volunteer_id: &str) -> bool { | |
| 279 | + // Placeholder for geographic diversity calculation | |
| 280 | + // Would integrate with actual geographic distribution data | |
| 281 | + false | |
| 282 | + } | |
| 283 | + | |
| 284 | + /// Distribute reward to volunteer | |
| 285 | + pub async fn distribute_reward( | |
| 286 | + &mut self, | |
| 287 | + volunteer_id: String, | |
| 288 | + amount: u64, | |
| 289 | + reason: RewardReason | |
| 290 | + ) -> Result<()> { | |
| 291 | + if amount > self.supply.reward_pool { | |
| 292 | + return Err(anyhow::anyhow!("Insufficient reward pool balance")); | |
| 293 | + } | |
| 294 | + | |
| 295 | + // Deduct from reward pool | |
| 296 | + self.supply.reward_pool -= amount; | |
| 297 | + | |
| 298 | + // Record reward | |
| 299 | + let record = RewardRecord { | |
| 300 | + volunteer_id: volunteer_id.clone(), | |
| 301 | + amount, | |
| 302 | + earned_at: Utc::now(), | |
| 303 | + reason, | |
| 304 | + }; | |
| 305 | + | |
| 306 | + self.reward_history.entry(volunteer_id) | |
| 307 | + .or_insert_with(Vec::new) | |
| 308 | + .push(record); | |
| 309 | + | |
| 310 | + tracing::info!("Distributed {} tokens to volunteer. Remaining pool: {}", | |
| 311 | + amount, self.supply.reward_pool); | |
| 312 | + | |
| 313 | + Ok(()) | |
| 314 | + } | |
| 315 | + | |
| 316 | + /// Update network health metrics | |
| 317 | + pub fn update_network_metrics(&mut self, metrics: NetworkHealthMetrics) { | |
| 318 | + self.health_metrics = metrics; | |
| 319 | + } | |
| 320 | + | |
| 321 | + /// Get current token supply status | |
| 322 | + pub fn get_supply_status(&self) -> TokenSupply { | |
| 323 | + self.supply.clone() | |
| 324 | + } | |
| 325 | + | |
| 326 | + /// Get total value locked in the system | |
| 327 | + pub fn get_total_value_locked(&self) -> u64 { | |
| 328 | + self.supply.reward_pool + self.supply.locked_reserve | |
| 329 | + } | |
| 330 | + | |
| 331 | + /// Perform periodic supply adjustment | |
| 332 | + pub async fn perform_supply_adjustment(&mut self) -> Result<()> { | |
| 333 | + let days_since_adjustment = (Utc::now() - self.supply.last_adjustment).num_days(); | |
| 334 | + | |
| 335 | + if days_since_adjustment >= self.config.inflation_control.adjustment_frequency_days as i64 { | |
| 336 | + // Calculate and mint new rewards based on network health | |
| 337 | + let daily_release = self.calculate_token_release_rate()?; | |
| 338 | + let adjustment_amount = daily_release * days_since_adjustment as u64; | |
| 339 | + | |
| 340 | + if adjustment_amount > 0 { | |
| 341 | + self.mint_rewards(adjustment_amount).await?; | |
| 342 | + } | |
| 343 | + | |
| 344 | + // Perform quarterly burn | |
| 345 | + self.burn_unused_tokens().await?; | |
| 346 | + | |
| 347 | + self.supply.last_adjustment = Utc::now(); | |
| 348 | + | |
| 349 | + tracing::info!("Performed supply adjustment: +{} tokens", adjustment_amount); | |
| 350 | + } | |
| 351 | + | |
| 352 | + Ok(()) | |
| 353 | + } | |
| 354 | +} | |
| 355 | + | |
| 356 | +#[cfg(test)] | |
| 357 | +mod tests { | |
| 358 | + use super::*; | |
| 359 | + | |
| 360 | + #[tokio::test] | |
| 361 | + async fn test_token_economics_basic() { | |
| 362 | + let config = TokenEconomics::default(); | |
| 363 | + let mut manager = TokenEconomicsManager::new(config); | |
| 364 | + | |
| 365 | + // Test initial state | |
| 366 | + assert_eq!(manager.get_supply_status().reward_pool, 0); | |
| 367 | + assert_eq!(manager.config.circulating_supply, 0); | |
| 368 | + | |
| 369 | + // Test minting | |
| 370 | + manager.mint_rewards(1000).await.unwrap(); | |
| 371 | + assert_eq!(manager.get_supply_status().reward_pool, 1000); | |
| 372 | + assert_eq!(manager.config.circulating_supply, 1000); | |
| 373 | + } | |
| 374 | + | |
| 375 | + #[tokio::test] | |
| 376 | + async fn test_reward_calculation() { | |
| 377 | + let config = TokenEconomics::default(); | |
| 378 | + let manager = TokenEconomicsManager::new(config); | |
| 379 | + | |
| 380 | + let reward = manager.calculate_volunteer_reward("test_volunteer", 10, 24, 0.8).unwrap(); | |
| 381 | + | |
| 382 | + // Base: 10 GB * 0.02 tokens = 0.2 tokens | |
| 383 | + // Uptime bonus: 50% of base = 0.1 tokens | |
| 384 | + // Performance bonus: 80% * 25% of base = 0.04 tokens | |
| 385 | + // Total should be around 0.34 tokens (in wei) | |
| 386 | + assert!(reward > 0); | |
| 387 | + } | |
| 388 | + | |
| 389 | + #[tokio::test] | |
| 390 | + async fn test_supply_constraints() { | |
| 391 | + let mut config = TokenEconomics::default(); | |
| 392 | + config.max_supply = 1000; // Small cap for testing | |
| 393 | + let mut manager = TokenEconomicsManager::new(config); | |
| 394 | + | |
| 395 | + // Should fail when exceeding max supply | |
| 396 | + assert!(manager.mint_rewards(2000).await.is_err()); | |
| 397 | + } | |
| 398 | +} | |
src/economics/zephyr_coin.rsadded@@ -0,0 +1,561 @@ | ||
| 1 | +//! ZephyrCoin Smart Contract Implementation | |
| 2 | +//! | |
| 3 | +//! ERC-20 compatible token for ZephyrFS network incentives | |
| 4 | + | |
| 5 | +use anyhow::Result; | |
| 6 | +use serde::{Deserialize, Serialize}; | |
| 7 | +use std::collections::HashMap; | |
| 8 | +use chrono::{DateTime, Utc}; | |
| 9 | + | |
| 10 | +/// ZephyrCoin token contract state | |
| 11 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 12 | +pub struct ZephyrCoin { | |
| 13 | + /// Token metadata | |
| 14 | + pub name: String, | |
| 15 | + pub symbol: String, | |
| 16 | + pub decimals: u8, | |
| 17 | + pub total_supply: u64, | |
| 18 | + | |
| 19 | + /// Balance tracking | |
| 20 | + pub balances: HashMap<String, u64>, | |
| 21 | + pub allowances: HashMap<String, HashMap<String, u64>>, | |
| 22 | + | |
| 23 | + /// ZephyrFS-specific features | |
| 24 | + pub contract_owner: String, | |
| 25 | + pub minters: HashMap<String, bool>, | |
| 26 | + pub burners: HashMap<String, bool>, | |
| 27 | + pub paused: bool, | |
| 28 | + | |
| 29 | + /// Economic controls | |
| 30 | + pub daily_mint_limit: u64, | |
| 31 | + pub daily_minted: u64, | |
| 32 | + pub last_mint_reset: DateTime<Utc>, | |
| 33 | + | |
| 34 | + /// Staking and governance | |
| 35 | + pub staked_balances: HashMap<String, StakedBalance>, | |
| 36 | + pub governance_proposals: HashMap<u64, GovernanceProposal>, | |
| 37 | + pub proposal_counter: u64, | |
| 38 | +} | |
| 39 | + | |
| 40 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 41 | +pub struct StakedBalance { | |
| 42 | + pub amount: u64, | |
| 43 | + pub staked_at: DateTime<Utc>, | |
| 44 | + pub unlock_time: DateTime<Utc>, | |
| 45 | + pub rewards_claimed: u64, | |
| 46 | +} | |
| 47 | + | |
| 48 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 49 | +pub struct GovernanceProposal { | |
| 50 | + pub id: u64, | |
| 51 | + pub proposer: String, | |
| 52 | + pub title: String, | |
| 53 | + pub description: String, | |
| 54 | + pub target_contract: Option<String>, | |
| 55 | + pub call_data: Option<Vec<u8>>, | |
| 56 | + pub created_at: DateTime<Utc>, | |
| 57 | + pub voting_ends_at: DateTime<Utc>, | |
| 58 | + pub votes_for: u64, | |
| 59 | + pub votes_against: u64, | |
| 60 | + pub executed: bool, | |
| 61 | + pub voters: HashMap<String, Vote>, | |
| 62 | +} | |
| 63 | + | |
| 64 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 65 | +pub enum Vote { | |
| 66 | + For(u64), // Amount of tokens voted | |
| 67 | + Against(u64), | |
| 68 | +} | |
| 69 | + | |
| 70 | +/// ERC-20 Events | |
| 71 | +#[derive(Debug, Clone, Serialize, Deserialize)] | |
| 72 | +pub enum TokenEvent { | |
| 73 | + Transfer { | |
| 74 | + from: String, | |
| 75 | + to: String, | |
| 76 | + value: u64, | |
| 77 | + }, | |
| 78 | + Approval { | |
| 79 | + owner: String, | |
| 80 | + spender: String, | |
| 81 | + value: u64, | |
| 82 | + }, | |
| 83 | + Mint { | |
| 84 | + to: String, | |
| 85 | + value: u64, | |
| 86 | + }, | |
| 87 | + Burn { | |
| 88 | + from: String, | |
| 89 | + value: u64, | |
| 90 | + }, | |
| 91 | + Stake { | |
| 92 | + user: String, | |
| 93 | + amount: u64, | |
| 94 | + duration_days: u32, | |
| 95 | + }, | |
| 96 | + Unstake { | |
| 97 | + user: String, | |
| 98 | + amount: u64, | |
| 99 | + rewards: u64, | |
| 100 | + }, | |
| 101 | + ProposalCreated { | |
| 102 | + id: u64, | |
| 103 | + proposer: String, | |
| 104 | + title: String, | |
| 105 | + }, | |
| 106 | + VoteCast { | |
| 107 | + proposal_id: u64, | |
| 108 | + voter: String, | |
| 109 | + support: bool, | |
| 110 | + weight: u64, | |
| 111 | + }, | |
| 112 | +} | |
| 113 | + | |
| 114 | +impl Default for ZephyrCoin { | |
| 115 | + fn default() -> Self { | |
| 116 | + Self { | |
| 117 | + name: "ZephyrCoin".to_string(), | |
| 118 | + symbol: "ZEPH".to_string(), | |
| 119 | + decimals: 18, | |
| 120 | + total_supply: 0, | |
| 121 | + balances: HashMap::new(), | |
| 122 | + allowances: HashMap::new(), | |
| 123 | + contract_owner: String::new(), | |
| 124 | + minters: HashMap::new(), | |
| 125 | + burners: HashMap::new(), | |
| 126 | + paused: false, | |
| 127 | + daily_mint_limit: 1_000_000 * 10_u64.pow(18), // 1M tokens per day max | |
| 128 | + daily_minted: 0, | |
| 129 | + last_mint_reset: Utc::now(), | |
| 130 | + staked_balances: HashMap::new(), | |
| 131 | + governance_proposals: HashMap::new(), | |
| 132 | + proposal_counter: 0, | |
| 133 | + } | |
| 134 | + } | |
| 135 | +} | |
| 136 | + | |
| 137 | +impl ZephyrCoin { | |
| 138 | + /// Initialize new ZephyrCoin contract | |
| 139 | + pub fn new(owner: String, initial_supply: u64) -> Self { | |
| 140 | + let mut coin = Self::default(); | |
| 141 | + coin.contract_owner = owner.clone(); | |
| 142 | + coin.total_supply = initial_supply; | |
| 143 | + coin.balances.insert(owner.clone(), initial_supply); | |
| 144 | + coin.minters.insert(owner, true); | |
| 145 | + coin | |
| 146 | + } | |
| 147 | + | |
| 148 | + /// ERC-20: Get balance of account | |
| 149 | + pub fn balance_of(&self, account: &str) -> u64 { | |
| 150 | + self.balances.get(account).copied().unwrap_or(0) | |
| 151 | + } | |
| 152 | + | |
| 153 | + /// ERC-20: Transfer tokens | |
| 154 | + pub fn transfer(&mut self, from: &str, to: &str, amount: u64) -> Result<TokenEvent> { | |
| 155 | + self.require_not_paused()?; | |
| 156 | + | |
| 157 | + let from_balance = self.balance_of(from); | |
| 158 | + if from_balance < amount { | |
| 159 | + return Err(anyhow::anyhow!("Insufficient balance")); | |
| 160 | + } | |
| 161 | + | |
| 162 | + self.balances.insert(from.to_string(), from_balance - amount); | |
| 163 | + let to_balance = self.balance_of(to); | |
| 164 | + self.balances.insert(to.to_string(), to_balance + amount); | |
| 165 | + | |
| 166 | + Ok(TokenEvent::Transfer { | |
| 167 | + from: from.to_string(), | |
| 168 | + to: to.to_string(), | |
| 169 | + value: amount, | |
| 170 | + }) | |
| 171 | + } | |
| 172 | + | |
| 173 | + /// ERC-20: Approve spender | |
| 174 | + pub fn approve(&mut self, owner: &str, spender: &str, amount: u64) -> Result<TokenEvent> { | |
| 175 | + self.require_not_paused()?; | |
| 176 | + | |
| 177 | + self.allowances | |
| 178 | + .entry(owner.to_string()) | |
| 179 | + .or_insert_with(HashMap::new) | |
| 180 | + .insert(spender.to_string(), amount); | |
| 181 | + | |
| 182 | + Ok(TokenEvent::Approval { | |
| 183 | + owner: owner.to_string(), | |
| 184 | + spender: spender.to_string(), | |
| 185 | + value: amount, | |
| 186 | + }) | |
| 187 | + } | |
| 188 | + | |
| 189 | + /// ERC-20: Transfer from approved amount | |
| 190 | + pub fn transfer_from(&mut self, spender: &str, from: &str, to: &str, amount: u64) -> Result<TokenEvent> { | |
| 191 | + self.require_not_paused()?; | |
| 192 | + | |
| 193 | + // Check allowance | |
| 194 | + let allowance = self.allowances | |
| 195 | + .get(from) | |
| 196 | + .and_then(|allowances| allowances.get(spender)) | |
| 197 | + .copied() | |
| 198 | + .unwrap_or(0); | |
| 199 | + | |
| 200 | + if allowance < amount { | |
| 201 | + return Err(anyhow::anyhow!("Insufficient allowance")); | |
| 202 | + } | |
| 203 | + | |
| 204 | + // Update allowance | |
| 205 | + self.allowances | |
| 206 | + .get_mut(from) | |
| 207 | + .unwrap() | |
| 208 | + .insert(spender.to_string(), allowance - amount); | |
| 209 | + | |
| 210 | + // Perform transfer | |
| 211 | + self.transfer(from, to, amount) | |
| 212 | + } | |
| 213 | + | |
| 214 | + /// ERC-20: Get allowance | |
| 215 | + pub fn allowance(&self, owner: &str, spender: &str) -> u64 { | |
| 216 | + self.allowances | |
| 217 | + .get(owner) | |
| 218 | + .and_then(|allowances| allowances.get(spender)) | |
| 219 | + .copied() | |
| 220 | + .unwrap_or(0) | |
| 221 | + } | |
| 222 | + | |
| 223 | + /// Mint new tokens (ZephyrFS-specific) | |
| 224 | + pub fn mint(&mut self, caller: &str, to: &str, amount: u64) -> Result<TokenEvent> { | |
| 225 | + self.require_not_paused()?; | |
| 226 | + self.require_minter(caller)?; | |
| 227 | + self.check_mint_limits(amount)?; | |
| 228 | + | |
| 229 | + self.total_supply += amount; | |
| 230 | + let balance = self.balance_of(to); | |
| 231 | + self.balances.insert(to.to_string(), balance + amount); | |
| 232 | + | |
| 233 | + // Update daily mint tracking | |
| 234 | + self.daily_minted += amount; | |
| 235 | + | |
| 236 | + Ok(TokenEvent::Mint { | |
| 237 | + to: to.to_string(), | |
| 238 | + value: amount, | |
| 239 | + }) | |
| 240 | + } | |
| 241 | + | |
| 242 | + /// Burn tokens (ZephyrFS-specific) | |
| 243 | + pub fn burn(&mut self, caller: &str, from: &str, amount: u64) -> Result<TokenEvent> { | |
| 244 | + self.require_not_paused()?; | |
| 245 | + self.require_burner(caller)?; | |
| 246 | + | |
| 247 | + let balance = self.balance_of(from); | |
| 248 | + if balance < amount { | |
| 249 | + return Err(anyhow::anyhow!("Insufficient balance to burn")); | |
| 250 | + } | |
| 251 | + | |
| 252 | + self.total_supply -= amount; | |
| 253 | + self.balances.insert(from.to_string(), balance - amount); | |
| 254 | + | |
| 255 | + Ok(TokenEvent::Burn { | |
| 256 | + from: from.to_string(), | |
| 257 | + value: amount, | |
| 258 | + }) | |
| 259 | + } | |
| 260 | + | |
| 261 | + /// Stake tokens for governance and rewards | |
| 262 | + pub fn stake(&mut self, user: &str, amount: u64, duration_days: u32) -> Result<TokenEvent> { | |
| 263 | + self.require_not_paused()?; | |
| 264 | + | |
| 265 | + let balance = self.balance_of(user); | |
| 266 | + if balance < amount { | |
| 267 | + return Err(anyhow::anyhow!("Insufficient balance to stake")); | |
| 268 | + } | |
| 269 | + | |
| 270 | + // Lock tokens | |
| 271 | + self.balances.insert(user.to_string(), balance - amount); | |
| 272 | + | |
| 273 | + // Add to staking | |
| 274 | + let unlock_time = Utc::now() + chrono::Duration::days(duration_days as i64); | |
| 275 | + let staked = StakedBalance { | |
| 276 | + amount, | |
| 277 | + staked_at: Utc::now(), | |
| 278 | + unlock_time, | |
| 279 | + rewards_claimed: 0, | |
| 280 | + }; | |
| 281 | + | |
| 282 | + self.staked_balances.insert(user.to_string(), staked); | |
| 283 | + | |
| 284 | + Ok(TokenEvent::Stake { | |
| 285 | + user: user.to_string(), | |
| 286 | + amount, | |
| 287 | + duration_days, | |
| 288 | + }) | |
| 289 | + } | |
| 290 | + | |
| 291 | + /// Unstake tokens after lock period | |
| 292 | + pub fn unstake(&mut self, user: &str) -> Result<TokenEvent> { | |
| 293 | + self.require_not_paused()?; | |
| 294 | + | |
| 295 | + let staked = self.staked_balances.get(user) | |
| 296 | + .ok_or_else(|| anyhow::anyhow!("No staked balance found"))?; | |
| 297 | + | |
| 298 | + if Utc::now() < staked.unlock_time { | |
| 299 | + return Err(anyhow::anyhow!("Tokens still locked")); | |
| 300 | + } | |
| 301 | + | |
| 302 | + // Calculate staking rewards (5% APY) | |
| 303 | + let staking_duration = (Utc::now() - staked.staked_at).num_days(); | |
| 304 | + let rewards = (staked.amount as f64 * 0.05 * staking_duration as f64 / 365.0) as u64; | |
| 305 | + | |
| 306 | + // Return staked amount + rewards | |
| 307 | + let balance = self.balance_of(user); | |
| 308 | + self.balances.insert(user.to_string(), balance + staked.amount + rewards); | |
| 309 | + | |
| 310 | + // Mint rewards | |
| 311 | + self.total_supply += rewards; | |
| 312 | + | |
| 313 | + // Remove from staking | |
| 314 | + self.staked_balances.remove(user); | |
| 315 | + | |
| 316 | + Ok(TokenEvent::Unstake { | |
| 317 | + user: user.to_string(), | |
| 318 | + amount: staked.amount, | |
| 319 | + rewards, | |
| 320 | + }) | |
| 321 | + } | |
| 322 | + | |
| 323 | + /// Create governance proposal | |
| 324 | + pub fn create_proposal( | |
| 325 | + &mut self, | |
| 326 | + proposer: &str, | |
| 327 | + title: String, | |
| 328 | + description: String, | |
| 329 | + target_contract: Option<String>, | |
| 330 | + call_data: Option<Vec<u8>>, | |
| 331 | + voting_duration_days: u32, | |
| 332 | + ) -> Result<TokenEvent> { | |
| 333 | + self.require_not_paused()?; | |
| 334 | + | |
| 335 | + // Require minimum stake to propose (10,000 ZEPH) | |
| 336 | + let min_stake = 10_000 * 10_u64.pow(18); | |
| 337 | + let staked = self.staked_balances.get(proposer) | |
| 338 | + .ok_or_else(|| anyhow::anyhow!("Must stake tokens to propose"))?; | |
| 339 | + | |
| 340 | + if staked.amount < min_stake { | |
| 341 | + return Err(anyhow::anyhow!("Insufficient stake to create proposal")); | |
| 342 | + } | |
| 343 | + | |
| 344 | + self.proposal_counter += 1; | |
| 345 | + let proposal = GovernanceProposal { | |
| 346 | + id: self.proposal_counter, | |
| 347 | + proposer: proposer.to_string(), | |
| 348 | + title: title.clone(), | |
| 349 | + description, | |
| 350 | + target_contract, | |
| 351 | + call_data, | |
| 352 | + created_at: Utc::now(), | |
| 353 | + voting_ends_at: Utc::now() + chrono::Duration::days(voting_duration_days as i64), | |
| 354 | + votes_for: 0, | |
| 355 | + votes_against: 0, | |
| 356 | + executed: false, | |
| 357 | + voters: HashMap::new(), | |
| 358 | + }; | |
| 359 | + | |
| 360 | + self.governance_proposals.insert(self.proposal_counter, proposal); | |
| 361 | + | |
| 362 | + Ok(TokenEvent::ProposalCreated { | |
| 363 | + id: self.proposal_counter, | |
| 364 | + proposer: proposer.to_string(), | |
| 365 | + title, | |
| 366 | + }) | |
| 367 | + } | |
| 368 | + | |
| 369 | + /// Vote on governance proposal | |
| 370 | + pub fn vote(&mut self, voter: &str, proposal_id: u64, support: bool) -> Result<TokenEvent> { | |
| 371 | + self.require_not_paused()?; | |
| 372 | + | |
| 373 | + let proposal = self.governance_proposals.get_mut(&proposal_id) | |
| 374 | + .ok_or_else(|| anyhow::anyhow!("Proposal not found"))?; | |
| 375 | + | |
| 376 | + if Utc::now() > proposal.voting_ends_at { | |
| 377 | + return Err(anyhow::anyhow!("Voting period ended")); | |
| 378 | + } | |
| 379 | + | |
| 380 | + if proposal.voters.contains_key(voter) { | |
| 381 | + return Err(anyhow::anyhow!("Already voted")); | |
| 382 | + } | |
| 383 | + | |
| 384 | + // Voting weight = staked tokens | |
| 385 | + let staked = self.staked_balances.get(voter) | |
| 386 | + .ok_or_else(|| anyhow::anyhow!("Must stake tokens to vote"))?; | |
| 387 | + | |
| 388 | + let weight = staked.amount; | |
| 389 | + | |
| 390 | + if support { | |
| 391 | + proposal.votes_for += weight; | |
| 392 | + proposal.voters.insert(voter.to_string(), Vote::For(weight)); | |
| 393 | + } else { | |
| 394 | + proposal.votes_against += weight; | |
| 395 | + proposal.voters.insert(voter.to_string(), Vote::Against(weight)); | |
| 396 | + } | |
| 397 | + | |
| 398 | + Ok(TokenEvent::VoteCast { | |
| 399 | + proposal_id, | |
| 400 | + voter: voter.to_string(), | |
| 401 | + support, | |
| 402 | + weight, | |
| 403 | + }) | |
| 404 | + } | |
| 405 | + | |
| 406 | + /// Add minter role | |
| 407 | + pub fn add_minter(&mut self, caller: &str, minter: &str) -> Result<()> { | |
| 408 | + self.require_owner(caller)?; | |
| 409 | + self.minters.insert(minter.to_string(), true); | |
| 410 | + Ok(()) | |
| 411 | + } | |
| 412 | + | |
| 413 | + /// Add burner role | |
| 414 | + pub fn add_burner(&mut self, caller: &str, burner: &str) -> Result<()> { | |
| 415 | + self.require_owner(caller)?; | |
| 416 | + self.burners.insert(burner.to_string(), true); | |
| 417 | + Ok(()) | |
| 418 | + } | |
| 419 | + | |
| 420 | + /// Pause contract | |
| 421 | + pub fn pause(&mut self, caller: &str) -> Result<()> { | |
| 422 | + self.require_owner(caller)?; | |
| 423 | + self.paused = true; | |
| 424 | + Ok(()) | |
| 425 | + } | |
| 426 | + | |
| 427 | + /// Unpause contract | |
| 428 | + pub fn unpause(&mut self, caller: &str) -> Result<()> { | |
| 429 | + self.require_owner(caller)?; | |
| 430 | + self.paused = false; | |
| 431 | + Ok(()) | |
| 432 | + } | |
| 433 | + | |
| 434 | + /// Get staked balance | |
| 435 | + pub fn get_staked_balance(&self, user: &str) -> Option<&StakedBalance> { | |
| 436 | + self.staked_balances.get(user) | |
| 437 | + } | |
| 438 | + | |
| 439 | + /// Get proposal | |
| 440 | + pub fn get_proposal(&self, proposal_id: u64) -> Option<&GovernanceProposal> { | |
| 441 | + self.governance_proposals.get(&proposal_id) | |
| 442 | + } | |
| 443 | + | |
| 444 | + /// Check and reset daily mint limits | |
| 445 | + fn check_mint_limits(&mut self, amount: u64) -> Result<()> { | |
| 446 | + let now = Utc::now(); | |
| 447 | + | |
| 448 | + // Reset daily counter if new day | |
| 449 | + if (now - self.last_mint_reset).num_days() >= 1 { | |
| 450 | + self.daily_minted = 0; | |
| 451 | + self.last_mint_reset = now; | |
| 452 | + } | |
| 453 | + | |
| 454 | + if self.daily_minted + amount > self.daily_mint_limit { | |
| 455 | + return Err(anyhow::anyhow!("Daily mint limit exceeded")); | |
| 456 | + } | |
| 457 | + | |
| 458 | + Ok(()) | |
| 459 | + } | |
| 460 | + | |
| 461 | + fn require_owner(&self, caller: &str) -> Result<()> { | |
| 462 | + if caller != self.contract_owner { | |
| 463 | + return Err(anyhow::anyhow!("Only owner can call this function")); | |
| 464 | + } | |
| 465 | + Ok(()) | |
| 466 | + } | |
| 467 | + | |
| 468 | + fn require_minter(&self, caller: &str) -> Result<()> { | |
| 469 | + if !self.minters.get(caller).unwrap_or(&false) { | |
| 470 | + return Err(anyhow::anyhow!("Only minters can call this function")); | |
| 471 | + } | |
| 472 | + Ok(()) | |
| 473 | + } | |
| 474 | + | |
| 475 | + fn require_burner(&self, caller: &str) -> Result<()> { | |
| 476 | + if !self.burners.get(caller).unwrap_or(&false) { | |
| 477 | + return Err(anyhow::anyhow!("Only burners can call this function")); | |
| 478 | + } | |
| 479 | + Ok(()) | |
| 480 | + } | |
| 481 | + | |
| 482 | + fn require_not_paused(&self) -> Result<()> { | |
| 483 | + if self.paused { | |
| 484 | + return Err(anyhow::anyhow!("Contract is paused")); | |
| 485 | + } | |
| 486 | + Ok(()) | |
| 487 | + } | |
| 488 | +} | |
| 489 | + | |
| 490 | +#[cfg(test)] | |
| 491 | +mod tests { | |
| 492 | + use super::*; | |
| 493 | + | |
| 494 | + #[test] | |
| 495 | + fn test_zephyr_coin_basic() { | |
| 496 | + let owner = "owner".to_string(); | |
| 497 | + let initial_supply = 1000 * 10_u64.pow(18); | |
| 498 | + let mut coin = ZephyrCoin::new(owner.clone(), initial_supply); | |
| 499 | + | |
| 500 | + assert_eq!(coin.balance_of(&owner), initial_supply); | |
| 501 | + assert_eq!(coin.total_supply, initial_supply); | |
| 502 | + } | |
| 503 | + | |
| 504 | + #[test] | |
| 505 | + fn test_transfer() { | |
| 506 | + let owner = "owner".to_string(); | |
| 507 | + let recipient = "recipient".to_string(); | |
| 508 | + let mut coin = ZephyrCoin::new(owner.clone(), 1000); | |
| 509 | + | |
| 510 | + let event = coin.transfer(&owner, &recipient, 100).unwrap(); | |
| 511 | + | |
| 512 | + assert_eq!(coin.balance_of(&owner), 900); | |
| 513 | + assert_eq!(coin.balance_of(&recipient), 100); | |
| 514 | + | |
| 515 | + match event { | |
| 516 | + TokenEvent::Transfer { from, to, value } => { | |
| 517 | + assert_eq!(from, owner); | |
| 518 | + assert_eq!(to, recipient); | |
| 519 | + assert_eq!(value, 100); | |
| 520 | + } | |
| 521 | + _ => panic!("Expected Transfer event"), | |
| 522 | + } | |
| 523 | + } | |
| 524 | + | |
| 525 | + #[test] | |
| 526 | + fn test_staking() { | |
| 527 | + let owner = "owner".to_string(); | |
| 528 | + let mut coin = ZephyrCoin::new(owner.clone(), 1000); | |
| 529 | + | |
| 530 | + coin.stake(&owner, 500, 30).unwrap(); | |
| 531 | + | |
| 532 | + assert_eq!(coin.balance_of(&owner), 500); | |
| 533 | + let staked = coin.get_staked_balance(&owner).unwrap(); | |
| 534 | + assert_eq!(staked.amount, 500); | |
| 535 | + } | |
| 536 | + | |
| 537 | + #[test] | |
| 538 | + fn test_governance() { | |
| 539 | + let owner = "owner".to_string(); | |
| 540 | + let mut coin = ZephyrCoin::new(owner.clone(), 100_000 * 10_u64.pow(18)); | |
| 541 | + | |
| 542 | + // Stake tokens for governance | |
| 543 | + coin.stake(&owner, 50_000 * 10_u64.pow(18), 365).unwrap(); | |
| 544 | + | |
| 545 | + // Create proposal | |
| 546 | + coin.create_proposal( | |
| 547 | + &owner, | |
| 548 | + "Test Proposal".to_string(), | |
| 549 | + "A test governance proposal".to_string(), | |
| 550 | + None, | |
| 551 | + None, | |
| 552 | + 7, | |
| 553 | + ).unwrap(); | |
| 554 | + | |
| 555 | + // Vote on proposal | |
| 556 | + coin.vote(&owner, 1, true).unwrap(); | |
| 557 | + | |
| 558 | + let proposal = coin.get_proposal(1).unwrap(); | |
| 559 | + assert_eq!(proposal.votes_for, 50_000 * 10_u64.pow(18)); | |
| 560 | + } | |
| 561 | +} | |
src/lib.rsmodified@@ -18,6 +18,10 @@ pub mod verification; | ||
| 18 | 18 | pub mod audit; |
| 19 | 19 | pub mod proof; |
| 20 | 20 | |
| 21 | +// Phase 5.1: Economic Foundation & Token System | |
| 22 | +pub mod economics; | |
| 23 | +pub mod allocation; | |
| 24 | + | |
| 21 | 25 | pub use crypto::{ |
| 22 | 26 | ZephyrCrypto, CryptoParams, ScryptParams, AesParams, HashParams, |
| 23 | 27 | ContentHasher, VerificationHasher, EncryptedData, ContentId, HashAlgorithm |
@@ -45,3 +49,12 @@ pub use proof::{ | ||
| 45 | 49 | UnifiedProofManager, UnifiedProofConfig, ComprehensiveChallenge, |
| 46 | 50 | ComprehensiveVerificationResult as ProofVerificationResult, ProofStatistics |
| 47 | 51 | }; |
| 52 | + | |
| 53 | +// Phase 5.1: Economic system exports | |
| 54 | +pub use economics::{ | |
| 55 | + TokenEconomicsManager, ZephyrCoin, NetworkHealthController, ZephyrCoinAMM, | |
| 56 | + EarningsCalculator, PaymentProcessor, PayoutScheduler, PerformanceRewardsSystem | |
| 57 | +}; | |
| 58 | +pub use allocation::{ | |
| 59 | + DemocraticAllocationManager, AllocationStrategy, AllocationQuality | |
| 60 | +}; | |