@@ -0,0 +1,516 @@ |
| | 1 | +package coordinator |
| | 2 | + |
| | 3 | +import ( |
| | 4 | + "context" |
| | 5 | + "fmt" |
| | 6 | + "testing" |
| | 7 | + "time" |
| | 8 | + |
| | 9 | + "github.com/ZephyrFS/zephyrfs-coordinator/internal/config" |
| | 10 | + "github.com/ZephyrFS/zephyrfs-coordinator/internal/database" |
| | 11 | + "github.com/ZephyrFS/zephyrfs-coordinator/internal/models" |
| | 12 | +) |
| | 13 | + |
| | 14 | +// mockDatabase implements the Database interface for testing |
| | 15 | +type mockDatabase struct { |
| | 16 | + data map[string]map[string][]byte |
| | 17 | +} |
| | 18 | + |
| | 19 | +func newMockDatabase() *mockDatabase { |
| | 20 | + return &mockDatabase{ |
| | 21 | + data: make(map[string]map[string][]byte), |
| | 22 | + } |
| | 23 | +} |
| | 24 | + |
| | 25 | +func (m *mockDatabase) Set(bucket, key string, value []byte) error { |
| | 26 | + if m.data[bucket] == nil { |
| | 27 | + m.data[bucket] = make(map[string][]byte) |
| | 28 | + } |
| | 29 | + m.data[bucket][key] = value |
| | 30 | + return nil |
| | 31 | +} |
| | 32 | + |
| | 33 | +func (m *mockDatabase) Get(bucket, key string) ([]byte, error) { |
| | 34 | + if bucketData, exists := m.data[bucket]; exists { |
| | 35 | + if value, exists := bucketData[key]; exists { |
| | 36 | + return value, nil |
| | 37 | + } |
| | 38 | + } |
| | 39 | + return nil, database.ErrNotFound |
| | 40 | +} |
| | 41 | + |
| | 42 | +func (m *mockDatabase) Delete(bucket, key string) error { |
| | 43 | + if bucketData, exists := m.data[bucket]; exists { |
| | 44 | + delete(bucketData, key) |
| | 45 | + } |
| | 46 | + return nil |
| | 47 | +} |
| | 48 | + |
| | 49 | +func (m *mockDatabase) GetAll(bucket string) (map[string][]byte, error) { |
| | 50 | + if bucketData, exists := m.data[bucket]; exists { |
| | 51 | + result := make(map[string][]byte) |
| | 52 | + for k, v := range bucketData { |
| | 53 | + result[k] = v |
| | 54 | + } |
| | 55 | + return result, nil |
| | 56 | + } |
| | 57 | + return make(map[string][]byte), nil |
| | 58 | +} |
| | 59 | + |
| | 60 | +func (m *mockDatabase) CreateBucket(bucket string) error { |
| | 61 | + if m.data[bucket] == nil { |
| | 62 | + m.data[bucket] = make(map[string][]byte) |
| | 63 | + } |
| | 64 | + return nil |
| | 65 | +} |
| | 66 | + |
| | 67 | +func (m *mockDatabase) ListBuckets() ([]string, error) { |
| | 68 | + var buckets []string |
| | 69 | + for bucket := range m.data { |
| | 70 | + buckets = append(buckets, bucket) |
| | 71 | + } |
| | 72 | + return buckets, nil |
| | 73 | +} |
| | 74 | + |
| | 75 | +func (m *mockDatabase) Close() error { |
| | 76 | + return nil |
| | 77 | +} |
| | 78 | + |
| | 79 | +func (m *mockDatabase) Stats() (*database.Stats, error) { |
| | 80 | + return &database.Stats{}, nil |
| | 81 | +} |
| | 82 | + |
| | 83 | +// Define ErrNotFound for the mock |
| | 84 | +var ErrNotFound = fmt.Errorf("not found") |
| | 85 | + |
| | 86 | +// Test helper to create a coordinator for testing |
| | 87 | +func createTestCoordinator(t *testing.T) *Coordinator { |
| | 88 | + mockDB := newMockDatabase() |
| | 89 | + cfg := config.CoordinatorConfig{ |
| | 90 | + NodeTimeout: 30 * time.Second, |
| | 91 | + HeartbeatInterval: 10 * time.Second, |
| | 92 | + ReplicationFactor: 3, |
| | 93 | + MaxNodesPerChunk: 10, |
| | 94 | + CleanupInterval: 5 * time.Minute, |
| | 95 | + NodeInactiveAfter: 60 * time.Second, |
| | 96 | + GeographicSpread: true, |
| | 97 | + } |
| | 98 | + |
| | 99 | + coord := New(mockDB, cfg) |
| | 100 | + return coord |
| | 101 | +} |
| | 102 | + |
| | 103 | +func TestCoordinator_RegisterNode(t *testing.T) { |
| | 104 | + coord := createTestCoordinator(t) |
| | 105 | + defer coord.Shutdown(context.Background()) |
| | 106 | + |
| | 107 | + req := &models.RegisterNodeRequest{ |
| | 108 | + NodeID: "", // Should be auto-generated |
| | 109 | + Addresses: []string{"127.0.0.1:8080"}, |
| | 110 | + StorageCapacity: 1000000000, // 1GB |
| | 111 | + Capabilities: map[string]string{"version": "1.0.0"}, |
| | 112 | + } |
| | 113 | + |
| | 114 | + resp, err := coord.RegisterNode(context.Background(), req) |
| | 115 | + if err != nil { |
| | 116 | + t.Fatalf("RegisterNode failed: %v", err) |
| | 117 | + } |
| | 118 | + |
| | 119 | + if !resp.Success { |
| | 120 | + t.Errorf("Expected success=true, got %v", resp.Success) |
| | 121 | + } |
| | 122 | + |
| | 123 | + if resp.AssignedNodeID == "" { |
| | 124 | + t.Errorf("Expected assigned node ID to be non-empty") |
| | 125 | + } |
| | 126 | + |
| | 127 | + if len(resp.BootstrapPeers) != 0 { |
| | 128 | + t.Errorf("Expected 0 bootstrap peers for first node, got %d", len(resp.BootstrapPeers)) |
| | 129 | + } |
| | 130 | + |
| | 131 | + // Verify node was stored |
| | 132 | + coord.nodesMux.RLock() |
| | 133 | + node, exists := coord.nodes[resp.AssignedNodeID] |
| | 134 | + coord.nodesMux.RUnlock() |
| | 135 | + |
| | 136 | + if !exists { |
| | 137 | + t.Errorf("Node was not stored in coordinator") |
| | 138 | + } |
| | 139 | + |
| | 140 | + if node.StorageCapacity != req.StorageCapacity { |
| | 141 | + t.Errorf("Expected storage capacity %d, got %d", req.StorageCapacity, node.StorageCapacity) |
| | 142 | + } |
| | 143 | +} |
| | 144 | + |
| | 145 | +func TestCoordinator_RegisterNodeWithExistingNodes(t *testing.T) { |
| | 146 | + coord := createTestCoordinator(t) |
| | 147 | + defer coord.Shutdown(context.Background()) |
| | 148 | + |
| | 149 | + // Register first node |
| | 150 | + req1 := &models.RegisterNodeRequest{ |
| | 151 | + Addresses: []string{"127.0.0.1:8080"}, |
| | 152 | + StorageCapacity: 1000000000, |
| | 153 | + } |
| | 154 | + resp1, err := coord.RegisterNode(context.Background(), req1) |
| | 155 | + if err != nil { |
| | 156 | + t.Fatalf("First RegisterNode failed: %v", err) |
| | 157 | + } |
| | 158 | + |
| | 159 | + // Register second node |
| | 160 | + req2 := &models.RegisterNodeRequest{ |
| | 161 | + Addresses: []string{"127.0.0.1:8081"}, |
| | 162 | + StorageCapacity: 2000000000, |
| | 163 | + } |
| | 164 | + resp2, err := coord.RegisterNode(context.Background(), req2) |
| | 165 | + if err != nil { |
| | 166 | + t.Fatalf("Second RegisterNode failed: %v", err) |
| | 167 | + } |
| | 168 | + |
| | 169 | + if len(resp2.BootstrapPeers) == 0 { |
| | 170 | + t.Errorf("Expected bootstrap peers for second node, got none") |
| | 171 | + } |
| | 172 | + |
| | 173 | + // Bootstrap peers should include first node's address |
| | 174 | + found := false |
| | 175 | + for _, peer := range resp2.BootstrapPeers { |
| | 176 | + if peer == req1.Addresses[0] { |
| | 177 | + found = true |
| | 178 | + break |
| | 179 | + } |
| | 180 | + } |
| | 181 | + if !found { |
| | 182 | + t.Errorf("Bootstrap peers should include first node's address") |
| | 183 | + } |
| | 184 | +} |
| | 185 | + |
| | 186 | +func TestCoordinator_NodeHeartbeat(t *testing.T) { |
| | 187 | + coord := createTestCoordinator(t) |
| | 188 | + defer coord.Shutdown(context.Background()) |
| | 189 | + |
| | 190 | + // Register a node first |
| | 191 | + registerReq := &models.RegisterNodeRequest{ |
| | 192 | + Addresses: []string{"127.0.0.1:8080"}, |
| | 193 | + StorageCapacity: 1000000000, |
| | 194 | + } |
| | 195 | + registerResp, err := coord.RegisterNode(context.Background(), registerReq) |
| | 196 | + if err != nil { |
| | 197 | + t.Fatalf("RegisterNode failed: %v", err) |
| | 198 | + } |
| | 199 | + |
| | 200 | + nodeID := registerResp.AssignedNodeID |
| | 201 | + |
| | 202 | + // Send heartbeat |
| | 203 | + heartbeatReq := &models.NodeHeartbeatRequest{ |
| | 204 | + NodeID: nodeID, |
| | 205 | + Stats: &models.NodeStats{ |
| | 206 | + StorageUsed: 500000000, |
| | 207 | + StorageAvailable: 500000000, |
| | 208 | + ChunksStored: 100, |
| | 209 | + CpuUsage: 25.5, |
| | 210 | + MemoryUsage: 60.0, |
| | 211 | + UptimeSeconds: 3600, |
| | 212 | + }, |
| | 213 | + } |
| | 214 | + |
| | 215 | + heartbeatResp, err := coord.NodeHeartbeat(context.Background(), heartbeatReq) |
| | 216 | + if err != nil { |
| | 217 | + t.Fatalf("NodeHeartbeat failed: %v", err) |
| | 218 | + } |
| | 219 | + |
| | 220 | + if !heartbeatResp.Success { |
| | 221 | + t.Errorf("Expected heartbeat success=true, got %v", heartbeatResp.Success) |
| | 222 | + } |
| | 223 | + |
| | 224 | + // Verify stats were updated |
| | 225 | + coord.nodesMux.RLock() |
| | 226 | + node, exists := coord.nodes[nodeID] |
| | 227 | + coord.nodesMux.RUnlock() |
| | 228 | + |
| | 229 | + if !exists { |
| | 230 | + t.Fatalf("Node not found after heartbeat") |
| | 231 | + } |
| | 232 | + |
| | 233 | + if node.Stats.StorageUsed != heartbeatReq.Stats.StorageUsed { |
| | 234 | + t.Errorf("Expected storage used %d, got %d", heartbeatReq.Stats.StorageUsed, node.Stats.StorageUsed) |
| | 235 | + } |
| | 236 | + |
| | 237 | + if node.Status != "active" { |
| | 238 | + t.Errorf("Expected node status to be 'active', got '%s'", node.Status) |
| | 239 | + } |
| | 240 | +} |
| | 241 | + |
| | 242 | +func TestCoordinator_RegisterFile(t *testing.T) { |
| | 243 | + coord := createTestCoordinator(t) |
| | 244 | + defer coord.Shutdown(context.Background()) |
| | 245 | + |
| | 246 | + // Register some nodes first |
| | 247 | + for i := 0; i < 5; i++ { |
| | 248 | + registerReq := &models.RegisterNodeRequest{ |
| | 249 | + Addresses: []string{fmt.Sprintf("127.0.0.1:808%d", i)}, |
| | 250 | + StorageCapacity: 1000000000, |
| | 251 | + } |
| | 252 | + _, err := coord.RegisterNode(context.Background(), registerReq) |
| | 253 | + if err != nil { |
| | 254 | + t.Fatalf("RegisterNode %d failed: %v", i, err) |
| | 255 | + } |
| | 256 | + } |
| | 257 | + |
| | 258 | + // Register a file |
| | 259 | + fileReq := &models.RegisterFileRequest{ |
| | 260 | + FileID: "test-file-123", |
| | 261 | + FileName: "test.txt", |
| | 262 | + FileSize: 1048576, // 1MB |
| | 263 | + FileHash: "abcd1234", |
| | 264 | + Chunks: []*models.ChunkMetadata{ |
| | 265 | + { |
| | 266 | + ChunkID: "chunk-1", |
| | 267 | + Hash: "hash1", |
| | 268 | + Size: 524288, // 512KB |
| | 269 | + Index: 0, |
| | 270 | + }, |
| | 271 | + { |
| | 272 | + ChunkID: "chunk-2", |
| | 273 | + Hash: "hash2", |
| | 274 | + Size: 524288, // 512KB |
| | 275 | + Index: 1, |
| | 276 | + }, |
| | 277 | + }, |
| | 278 | + OwnerNodeID: "owner-node-123", |
| | 279 | + } |
| | 280 | + |
| | 281 | + fileResp, err := coord.RegisterFile(context.Background(), fileReq) |
| | 282 | + if err != nil { |
| | 283 | + t.Fatalf("RegisterFile failed: %v", err) |
| | 284 | + } |
| | 285 | + |
| | 286 | + if !fileResp.Success { |
| | 287 | + t.Errorf("Expected file registration success=true, got %v", fileResp.Success) |
| | 288 | + } |
| | 289 | + |
| | 290 | + if len(fileResp.ChunkPlacements) != len(fileReq.Chunks) { |
| | 291 | + t.Errorf("Expected %d chunk placements, got %d", len(fileReq.Chunks), len(fileResp.ChunkPlacements)) |
| | 292 | + } |
| | 293 | + |
| | 294 | + // Verify each chunk has appropriate replication |
| | 295 | + for _, placement := range fileResp.ChunkPlacements { |
| | 296 | + if len(placement.TargetNodes) < coord.config.ReplicationFactor { |
| | 297 | + t.Errorf("Chunk %s has insufficient replication: %d < %d", |
| | 298 | + placement.ChunkID, len(placement.TargetNodes), coord.config.ReplicationFactor) |
| | 299 | + } |
| | 300 | + } |
| | 301 | + |
| | 302 | + // Verify file was stored |
| | 303 | + coord.filesMux.RLock() |
| | 304 | + file, exists := coord.files[fileReq.FileID] |
| | 305 | + coord.filesMux.RUnlock() |
| | 306 | + |
| | 307 | + if !exists { |
| | 308 | + t.Errorf("File was not stored in coordinator") |
| | 309 | + } |
| | 310 | + |
| | 311 | + if file.FileName != fileReq.FileName { |
| | 312 | + t.Errorf("Expected file name '%s', got '%s'", fileReq.FileName, file.FileName) |
| | 313 | + } |
| | 314 | +} |
| | 315 | + |
| | 316 | +func TestCoordinator_FindChunkLocations(t *testing.T) { |
| | 317 | + coord := createTestCoordinator(t) |
| | 318 | + defer coord.Shutdown(context.Background()) |
| | 319 | + |
| | 320 | + // Register nodes and a file first |
| | 321 | + nodeIDs := make([]string, 3) |
| | 322 | + for i := 0; i < 3; i++ { |
| | 323 | + registerReq := &models.RegisterNodeRequest{ |
| | 324 | + Addresses: []string{fmt.Sprintf("127.0.0.1:808%d", i)}, |
| | 325 | + StorageCapacity: 1000000000, |
| | 326 | + } |
| | 327 | + resp, err := coord.RegisterNode(context.Background(), registerReq) |
| | 328 | + if err != nil { |
| | 329 | + t.Fatalf("RegisterNode %d failed: %v", i, err) |
| | 330 | + } |
| | 331 | + nodeIDs[i] = resp.AssignedNodeID |
| | 332 | + } |
| | 333 | + |
| | 334 | + // Register a file |
| | 335 | + fileReq := &models.RegisterFileRequest{ |
| | 336 | + FileID: "test-file-123", |
| | 337 | + FileName: "test.txt", |
| | 338 | + FileSize: 524288, |
| | 339 | + FileHash: "abcd1234", |
| | 340 | + Chunks: []*models.ChunkMetadata{ |
| | 341 | + { |
| | 342 | + ChunkID: "chunk-1", |
| | 343 | + Hash: "hash1", |
| | 344 | + Size: 524288, |
| | 345 | + Index: 0, |
| | 346 | + }, |
| | 347 | + }, |
| | 348 | + OwnerNodeID: nodeIDs[0], |
| | 349 | + } |
| | 350 | + |
| | 351 | + _, err := coord.RegisterFile(context.Background(), fileReq) |
| | 352 | + if err != nil { |
| | 353 | + t.Fatalf("RegisterFile failed: %v", err) |
| | 354 | + } |
| | 355 | + |
| | 356 | + // Find chunk locations |
| | 357 | + findReq := &models.FindChunkLocationsRequest{ |
| | 358 | + ChunkID: "chunk-1", |
| | 359 | + PreferredCount: 2, |
| | 360 | + } |
| | 361 | + |
| | 362 | + findResp, err := coord.FindChunkLocations(context.Background(), findReq) |
| | 363 | + if err != nil { |
| | 364 | + t.Fatalf("FindChunkLocations failed: %v", err) |
| | 365 | + } |
| | 366 | + |
| | 367 | + if !findResp.Success { |
| | 368 | + t.Errorf("Expected find success=true, got %v", findResp.Success) |
| | 369 | + } |
| | 370 | + |
| | 371 | + if len(findResp.NodeIDs) == 0 { |
| | 372 | + t.Errorf("Expected to find chunk locations, got none") |
| | 373 | + } |
| | 374 | + |
| | 375 | + // Should respect preferred count |
| | 376 | + if len(findResp.NodeIDs) > int(findReq.PreferredCount) { |
| | 377 | + t.Errorf("Expected at most %d locations, got %d", findReq.PreferredCount, len(findResp.NodeIDs)) |
| | 378 | + } |
| | 379 | + |
| | 380 | + // Should have corresponding addresses |
| | 381 | + if len(findResp.NodeAddresses) != len(findResp.NodeIDs) { |
| | 382 | + t.Errorf("Mismatch between node IDs (%d) and addresses (%d)", |
| | 383 | + len(findResp.NodeIDs), len(findResp.NodeAddresses)) |
| | 384 | + } |
| | 385 | +} |
| | 386 | + |
| | 387 | +func TestCoordinator_GetActiveNodes(t *testing.T) { |
| | 388 | + coord := createTestCoordinator(t) |
| | 389 | + defer coord.Shutdown(context.Background()) |
| | 390 | + |
| | 391 | + // Register some nodes |
| | 392 | + nodeIDs := make([]string, 5) |
| | 393 | + for i := 0; i < 5; i++ { |
| | 394 | + registerReq := &models.RegisterNodeRequest{ |
| | 395 | + Addresses: []string{fmt.Sprintf("127.0.0.1:808%d", i)}, |
| | 396 | + StorageCapacity: 1000000000, |
| | 397 | + } |
| | 398 | + resp, err := coord.RegisterNode(context.Background(), registerReq) |
| | 399 | + if err != nil { |
| | 400 | + t.Fatalf("RegisterNode %d failed: %v", i, err) |
| | 401 | + } |
| | 402 | + nodeIDs[i] = resp.AssignedNodeID |
| | 403 | + } |
| | 404 | + |
| | 405 | + // Get active nodes |
| | 406 | + getReq := &models.GetActiveNodesRequest{ |
| | 407 | + Limit: 3, |
| | 408 | + ExcludeNodes: []string{nodeIDs[0]}, // Exclude first node |
| | 409 | + } |
| | 410 | + |
| | 411 | + getResp, err := coord.GetActiveNodes(context.Background(), getReq) |
| | 412 | + if err != nil { |
| | 413 | + t.Fatalf("GetActiveNodes failed: %v", err) |
| | 414 | + } |
| | 415 | + |
| | 416 | + if len(getResp.Nodes) > int(getReq.Limit) { |
| | 417 | + t.Errorf("Expected at most %d nodes, got %d", getReq.Limit, len(getResp.Nodes)) |
| | 418 | + } |
| | 419 | + |
| | 420 | + // Should not include excluded node |
| | 421 | + for _, node := range getResp.Nodes { |
| | 422 | + if node.NodeID == nodeIDs[0] { |
| | 423 | + t.Errorf("Excluded node %s was included in results", nodeIDs[0]) |
| | 424 | + } |
| | 425 | + } |
| | 426 | + |
| | 427 | + if getResp.TotalNodes == 0 { |
| | 428 | + t.Errorf("Expected total nodes > 0, got %d", getResp.TotalNodes) |
| | 429 | + } |
| | 430 | +} |
| | 431 | + |
| | 432 | +func TestCoordinator_GetNetworkStatus(t *testing.T) { |
| | 433 | + coord := createTestCoordinator(t) |
| | 434 | + defer coord.Shutdown(context.Background()) |
| | 435 | + |
| | 436 | + // Register some nodes |
| | 437 | + for i := 0; i < 3; i++ { |
| | 438 | + registerReq := &models.RegisterNodeRequest{ |
| | 439 | + Addresses: []string{fmt.Sprintf("127.0.0.1:808%d", i)}, |
| | 440 | + StorageCapacity: 1000000000, |
| | 441 | + } |
| | 442 | + _, err := coord.RegisterNode(context.Background(), registerReq) |
| | 443 | + if err != nil { |
| | 444 | + t.Fatalf("RegisterNode %d failed: %v", i, err) |
| | 445 | + } |
| | 446 | + } |
| | 447 | + |
| | 448 | + statusResp, err := coord.GetNetworkStatus(context.Background()) |
| | 449 | + if err != nil { |
| | 450 | + t.Fatalf("GetNetworkStatus failed: %v", err) |
| | 451 | + } |
| | 452 | + |
| | 453 | + if statusResp.NetworkStats.TotalNodes != 3 { |
| | 454 | + t.Errorf("Expected 3 total nodes, got %d", statusResp.NetworkStats.TotalNodes) |
| | 455 | + } |
| | 456 | + |
| | 457 | + if statusResp.NetworkStats.ActiveNodes != 3 { |
| | 458 | + t.Errorf("Expected 3 active nodes, got %d", statusResp.NetworkStats.ActiveNodes) |
| | 459 | + } |
| | 460 | + |
| | 461 | + if len(statusResp.ActiveNodes) != 3 { |
| | 462 | + t.Errorf("Expected 3 active nodes in list, got %d", len(statusResp.ActiveNodes)) |
| | 463 | + } |
| | 464 | + |
| | 465 | + if statusResp.Timestamp == 0 { |
| | 466 | + t.Errorf("Expected non-zero timestamp") |
| | 467 | + } |
| | 468 | +} |
| | 469 | + |
| | 470 | +// Benchmark tests |
| | 471 | + |
| | 472 | +func BenchmarkCoordinator_RegisterNode(b *testing.B) { |
| | 473 | + coord := createTestCoordinator(b) |
| | 474 | + defer coord.Shutdown(context.Background()) |
| | 475 | + |
| | 476 | + b.ResetTimer() |
| | 477 | + for i := 0; i < b.N; i++ { |
| | 478 | + req := &models.RegisterNodeRequest{ |
| | 479 | + Addresses: []string{fmt.Sprintf("127.0.0.1:808%d", i)}, |
| | 480 | + StorageCapacity: 1000000000, |
| | 481 | + } |
| | 482 | + _, err := coord.RegisterNode(context.Background(), req) |
| | 483 | + if err != nil { |
| | 484 | + b.Fatalf("RegisterNode failed: %v", err) |
| | 485 | + } |
| | 486 | + } |
| | 487 | +} |
| | 488 | + |
| | 489 | +func BenchmarkCoordinator_NodeHeartbeat(b *testing.B) { |
| | 490 | + coord := createTestCoordinator(b) |
| | 491 | + defer coord.Shutdown(context.Background()) |
| | 492 | + |
| | 493 | + // Register a node first |
| | 494 | + registerReq := &models.RegisterNodeRequest{ |
| | 495 | + Addresses: []string{"127.0.0.1:8080"}, |
| | 496 | + StorageCapacity: 1000000000, |
| | 497 | + } |
| | 498 | + registerResp, _ := coord.RegisterNode(context.Background(), registerReq) |
| | 499 | + nodeID := registerResp.AssignedNodeID |
| | 500 | + |
| | 501 | + heartbeatReq := &models.NodeHeartbeatRequest{ |
| | 502 | + NodeID: nodeID, |
| | 503 | + Stats: &models.NodeStats{ |
| | 504 | + StorageUsed: 500000000, |
| | 505 | + UptimeSeconds: 3600, |
| | 506 | + }, |
| | 507 | + } |
| | 508 | + |
| | 509 | + b.ResetTimer() |
| | 510 | + for i := 0; i < b.N; i++ { |
| | 511 | + _, err := coord.NodeHeartbeat(context.Background(), heartbeatReq) |
| | 512 | + if err != nil { |
| | 513 | + b.Fatalf("NodeHeartbeat failed: %v", err) |
| | 514 | + } |
| | 515 | + } |
| | 516 | +} |