|
1 | 1 | package pool |
2 | 2 |
|
3 | 3 | import ( |
| 4 | + "context" |
| 5 | + "fmt" |
4 | 6 | "strings" |
5 | 7 | "testing" |
6 | 8 | "time" |
7 | 9 |
|
| 10 | + "github.com/redis/go-redis/v9" |
8 | 11 | "github.com/stretchr/testify/assert" |
9 | 12 | "github.com/stretchr/testify/require" |
10 | 13 |
|
| 14 | + "goa.design/pulse/streaming" |
11 | 15 | ptesting "goa.design/pulse/testing" |
12 | 16 | ) |
13 | 17 |
|
@@ -90,6 +94,82 @@ func TestDispatchJobTwoWorkers(t *testing.T) { |
90 | 94 | assert.NoError(t, node.Shutdown(ctx), "Failed to shutdown node") |
91 | 95 | } |
92 | 96 |
|
| 97 | +func TestNotifyWorker(t *testing.T) { |
| 98 | + testName := strings.Replace(t.Name(), "/", "_", -1) |
| 99 | + ctx := ptesting.NewTestContext(t) |
| 100 | + rdb := ptesting.NewRedisClient(t) |
| 101 | + node := newTestNode(t, ctx, rdb, testName) |
| 102 | + defer ptesting.CleanupRedis(t, rdb, true, testName) |
| 103 | + |
| 104 | + // Create a worker |
| 105 | + worker := newTestWorker(t, ctx, node) |
| 106 | + |
| 107 | + // Set up notification handling |
| 108 | + jobKey := "test-job" |
| 109 | + jobPayload := []byte("job payload") |
| 110 | + notificationPayload := []byte("test notification") |
| 111 | + ch := make(chan []byte, 1) |
| 112 | + worker.handler.(*mockHandler).notifyFunc = func(key string, payload []byte) error { |
| 113 | + assert.Equal(t, jobKey, key, "Received notification for the wrong key") |
| 114 | + assert.Equal(t, notificationPayload, payload, "Received notification for the wrong payload") |
| 115 | + close(ch) |
| 116 | + return nil |
| 117 | + } |
| 118 | + |
| 119 | + // Dispatch a job to ensure the worker is assigned |
| 120 | + require.NoError(t, node.DispatchJob(ctx, jobKey, jobPayload)) |
| 121 | + |
| 122 | + // Send a notification |
| 123 | + err := node.NotifyWorker(ctx, jobKey, notificationPayload) |
| 124 | + require.NoError(t, err, "Failed to send notification") |
| 125 | + |
| 126 | + // Wait for the notification to be received |
| 127 | + select { |
| 128 | + case <-ch: |
| 129 | + case <-time.After(max): |
| 130 | + t.Fatal("Timeout waiting for notification to be received") |
| 131 | + } |
| 132 | + |
| 133 | + // Shutdown node |
| 134 | + assert.NoError(t, node.Shutdown(ctx), "Failed to shutdown node") |
| 135 | +} |
| 136 | + |
| 137 | +func TestNotifyWorkerNoHandler(t *testing.T) { |
| 138 | + testName := strings.Replace(t.Name(), "/", "_", -1) |
| 139 | + ctx, buf := ptesting.NewBufferedLogContext(t) |
| 140 | + rdb := ptesting.NewRedisClient(t) |
| 141 | + node := newTestNode(t, ctx, rdb, testName) |
| 142 | + defer ptesting.CleanupRedis(t, rdb, true, testName) |
| 143 | + |
| 144 | + // Create a worker without NotificationHandler implementation |
| 145 | + worker := newTestWorkerWithoutNotify(t, ctx, node) |
| 146 | + |
| 147 | + // Dispatch a job to ensure the worker is assigned |
| 148 | + jobKey := "test-job" |
| 149 | + jobPayload := []byte("job payload") |
| 150 | + require.NoError(t, node.DispatchJob(ctx, jobKey, jobPayload)) |
| 151 | + |
| 152 | + // Wait for the job to be received by the worker |
| 153 | + require.Eventually(t, func() bool { |
| 154 | + return len(worker.Jobs()) == 1 |
| 155 | + }, max, delay, "Job was not received by the worker") |
| 156 | + |
| 157 | + // Send a notification |
| 158 | + notificationPayload := []byte("test notification") |
| 159 | + assert.NoError(t, node.NotifyWorker(ctx, jobKey, notificationPayload), "Failed to send notification") |
| 160 | + |
| 161 | + // Check that an error was logged |
| 162 | + assert.Eventually(t, func() bool { |
| 163 | + return strings.Contains(buf.String(), "worker does not implement NotificationHandler, ignoring notification") |
| 164 | + }, max, delay, "Expected error message was not logged within the timeout period") |
| 165 | + |
| 166 | + // Ensure the worker is still functioning |
| 167 | + assert.Len(t, worker.Jobs(), 1, "Worker should still have the job") |
| 168 | + |
| 169 | + // Shutdown node |
| 170 | + assert.NoError(t, node.Shutdown(ctx), "Failed to shutdown node") |
| 171 | +} |
| 172 | + |
93 | 173 | func TestRemoveWorkerThenShutdown(t *testing.T) { |
94 | 174 | ctx := ptesting.NewTestContext(t) |
95 | 175 | testName := strings.Replace(t.Name(), "/", "_", -1) |
@@ -225,3 +305,79 @@ func TestNodeCloseAndRequeue(t *testing.T) { |
225 | 305 | // Clean up |
226 | 306 | require.NoError(t, node2.Shutdown(ctx), "Failed to shutdown node2") |
227 | 307 | } |
| 308 | + |
| 309 | +func TestStaleEventsAreRemoved(t *testing.T) { |
| 310 | + // Setup |
| 311 | + ctx := ptesting.NewTestContext(t) |
| 312 | + testName := strings.Replace(t.Name(), "/", "_", -1) |
| 313 | + rdb := ptesting.NewRedisClient(t) |
| 314 | + defer ptesting.CleanupRedis(t, rdb, true, testName) |
| 315 | + node := newTestNode(t, ctx, rdb, testName) |
| 316 | + defer func() { assert.NoError(t, node.Shutdown(ctx)) }() |
| 317 | + |
| 318 | + // Add a stale event manually |
| 319 | + staleEventID := fmt.Sprintf("%d-0", time.Now().Add(-2*node.pendingJobTTL).UnixNano()/int64(time.Millisecond)) |
| 320 | + staleEvent := &streaming.Event{ |
| 321 | + ID: staleEventID, |
| 322 | + EventName: "test-event", |
| 323 | + Payload: []byte("test-payload"), |
| 324 | + Acker: &mockAcker{ |
| 325 | + XAckFunc: func(ctx context.Context, streamKey, sinkName string, ids ...string) *redis.IntCmd { |
| 326 | + return redis.NewIntCmd(ctx, 0) |
| 327 | + }, |
| 328 | + }, |
| 329 | + } |
| 330 | + node.pendingEvents["worker:stale-event-id"] = staleEvent |
| 331 | + |
| 332 | + // Add a fresh event |
| 333 | + freshEventID := fmt.Sprintf("%d-0", time.Now().Add(-time.Second).UnixNano()/int64(time.Millisecond)) |
| 334 | + freshEvent := &streaming.Event{ |
| 335 | + ID: freshEventID, |
| 336 | + EventName: "test-event", |
| 337 | + Payload: []byte("test-payload"), |
| 338 | + Acker: &mockAcker{ |
| 339 | + XAckFunc: func(ctx context.Context, streamKey, sinkName string, ids ...string) *redis.IntCmd { |
| 340 | + return redis.NewIntCmd(ctx, 0) |
| 341 | + }, |
| 342 | + }, |
| 343 | + } |
| 344 | + node.pendingEvents["worker:fresh-event-id"] = freshEvent |
| 345 | + |
| 346 | + // Create a mock event to trigger the ackWorkerEvent function |
| 347 | + mockEvent := &streaming.Event{ |
| 348 | + ID: "mock-event-id", |
| 349 | + EventName: evAck, |
| 350 | + Payload: marshalEnvelope("worker", marshalAck(&ack{EventID: "mock-event-id"})), |
| 351 | + Acker: &mockAcker{ |
| 352 | + XAckFunc: func(ctx context.Context, streamKey, sinkName string, ids ...string) *redis.IntCmd { |
| 353 | + return redis.NewIntCmd(ctx, 0) |
| 354 | + }, |
| 355 | + }, |
| 356 | + } |
| 357 | + node.pendingEvents["worker:mock-event-id"] = mockEvent |
| 358 | + |
| 359 | + // Call ackWorkerEvent to trigger the stale event cleanup |
| 360 | + node.ackWorkerEvent(ctx, mockEvent) |
| 361 | + |
| 362 | + assert.Eventually(t, func() bool { |
| 363 | + node.lock.Lock() |
| 364 | + defer node.lock.Unlock() |
| 365 | + _, exists := node.pendingEvents["worker:stale-event-id"] |
| 366 | + return !exists |
| 367 | + }, max, delay, "Stale event should have been removed") |
| 368 | + |
| 369 | + assert.Eventually(t, func() bool { |
| 370 | + node.lock.Lock() |
| 371 | + defer node.lock.Unlock() |
| 372 | + _, exists := node.pendingEvents["worker:fresh-event-id"] |
| 373 | + return exists |
| 374 | + }, max, delay, "Fresh event should still be present") |
| 375 | +} |
| 376 | + |
| 377 | +type mockAcker struct { |
| 378 | + XAckFunc func(ctx context.Context, streamKey, sinkName string, ids ...string) *redis.IntCmd |
| 379 | +} |
| 380 | + |
| 381 | +func (m *mockAcker) XAck(ctx context.Context, streamKey, sinkName string, ids ...string) *redis.IntCmd { |
| 382 | + return m.XAckFunc(ctx, streamKey, sinkName, ids...) |
| 383 | +} |
0 commit comments