Skip to content

Commit fb2cf1d

Browse files
committed
Add rate limit handling and retry logic***
This commit adds rate limit handling and retry logic to the HTTP client. It introduces a new function `parseRateLimitHeaders` that parses rate limit headers and determines the wait duration before retrying. It also modifies the `executeRequestWithRetries` function to handle rate limit errors separately and wait before retrying. This improves the resilience of the HTTP client when dealing with rate limits and transient errors.
1 parent 33c8219 commit fb2cf1d

File tree

2 files changed

+59
-16
lines changed

2 files changed

+59
-16
lines changed

httpclient/httpclient_rate_handler.go

Lines changed: 29 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ import (
1818
"net/http"
1919
"strconv"
2020
"time"
21+
22+
"github.com/deploymenttheory/go-api-http-client/logger"
23+
"go.uber.org/zap"
2124
)
2225

2326
// Constants for exponential backoff with jitter
@@ -28,37 +31,53 @@ const (
2831
)
2932

3033
// calculateBackoff calculates the next delay for retry with exponential backoff and jitter.
34+
// The baseDelay is the initial delay duration, which is exponentially increased on each retry.
35+
// The jitterFactor adds randomness to the delay to avoid simultaneous retries (thundering herd problem).
36+
// The delay is capped at maxDelay to prevent excessive wait times.
3137
func calculateBackoff(retry int) time.Duration {
38+
if retry < 0 {
39+
retry = 0 // Ensure non-negative retry count
40+
}
41+
3242
delay := float64(baseDelay) * math.Pow(2, float64(retry))
3343
jitter := (rand.Float64() - 0.5) * jitterFactor * 2.0 // Random value between -jitterFactor and +jitterFactor
34-
delay *= (1.0 + jitter)
44+
delayWithJitter := delay * (1.0 + jitter)
3545

36-
if delay > float64(maxDelay) {
46+
if delayWithJitter > float64(maxDelay) {
3747
return maxDelay
3848
}
39-
return time.Duration(delay)
49+
return time.Duration(delayWithJitter)
4050
}
4151

4252
// parseRateLimitHeaders parses common rate limit headers and adjusts behavior accordingly.
43-
// For future compatibility.
44-
func parseRateLimitHeaders(resp *http.Response) time.Duration {
45-
// Check for the Retry-After header
53+
// It handles both Retry-After (in seconds or HTTP-date format) and X-RateLimit-Reset headers.
54+
func parseRateLimitHeaders(resp *http.Response, log logger.Logger) time.Duration {
55+
// Check for the Retry-After header in seconds
4656
if retryAfter := resp.Header.Get("Retry-After"); retryAfter != "" {
4757
if waitSeconds, err := strconv.Atoi(retryAfter); err == nil {
4858
return time.Duration(waitSeconds) * time.Second
59+
} else if retryAfterDate, err := time.Parse(time.RFC1123, retryAfter); err == nil {
60+
// Handle HTTP-date format in Retry-After
61+
return time.Until(retryAfterDate)
62+
} else {
63+
log.Debug("Unable to parse Retry-After header", zap.String("value", retryAfter), zap.Error(err))
4964
}
5065
}
5166

5267
// Check for X-RateLimit-Remaining; if it's 0, use X-RateLimit-Reset to determine how long to wait
5368
if remaining := resp.Header.Get("X-RateLimit-Remaining"); remaining == "0" {
5469
if resetTimeStr := resp.Header.Get("X-RateLimit-Reset"); resetTimeStr != "" {
55-
if resetTimeUnix, err := strconv.ParseInt(resetTimeStr, 10, 64); err == nil {
56-
resetTime := time.Unix(resetTimeUnix, 0)
57-
return time.Until(resetTime) // Using time.Until instead of t.Sub(time.Now())
70+
if resetTimeEpoch, err := strconv.ParseInt(resetTimeStr, 10, 64); err == nil {
71+
resetTime := time.Unix(resetTimeEpoch, 0)
72+
// Add a buffer to account for potential clock skew
73+
const skewBuffer = 5 * time.Second
74+
return time.Until(resetTime) + skewBuffer
75+
} else {
76+
log.Debug("Unable to parse X-RateLimit-Reset header", zap.String("value", resetTimeStr), zap.Error(err))
5877
}
5978
}
6079
}
6180

62-
// No rate limiting headers found, return 0
81+
// No relevant rate limiting headers found, return 0
6382
return 0
6483
}

httpclient/httpclient_request.go

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -170,18 +170,41 @@ func (c *Client) executeRequestWithRetries(method, endpoint string, body, out in
170170
log.Warn("Non-retryable error received", zap.Int("status_code", resp.StatusCode), zap.String("status_message", statusMessage))
171171
return resp, errors.HandleAPIError(resp, log)
172172
}
173+
/*
174+
// Check for retryable errors
175+
if errors.IsRateLimitError(resp) || errors.IsTransientError(resp) {
176+
retryCount++
177+
if retryCount > c.clientConfig.ClientOptions.MaxRetryAttempts {
178+
log.Warn("Max retry attempts reached", zap.String("method", method), zap.String("endpoint", endpoint))
179+
break
180+
}
181+
waitDuration := calculateBackoff(retryCount)
182+
log.Warn("Retrying request due to error", zap.String("method", method), zap.String("endpoint", endpoint), zap.Int("retryCount", retryCount), zap.Duration("waitDuration", waitDuration), zap.Error(err), zap.String("status_message", statusMessage))
183+
time.Sleep(waitDuration)
184+
continue
185+
}
186+
*/
187+
// Parsing rate limit headers if a rate-limit error is detected
188+
if errors.IsRateLimitError(resp) {
189+
waitDuration := parseRateLimitHeaders(resp, log)
190+
if waitDuration > 0 {
191+
log.Warn("Rate limit encountered, waiting before retrying", zap.Duration("waitDuration", waitDuration))
192+
time.Sleep(waitDuration)
193+
continue // Continue to next iteration after waiting
194+
}
195+
}
173196

174-
// Check for retryable errors
175-
if errors.IsRateLimitError(resp) || errors.IsTransientError(resp) {
197+
// Handling retryable errors with exponential backoff
198+
if errors.IsTransientError(resp) {
176199
retryCount++
177200
if retryCount > c.clientConfig.ClientOptions.MaxRetryAttempts {
178201
log.Warn("Max retry attempts reached", zap.String("method", method), zap.String("endpoint", endpoint))
179-
break
202+
break // Stop retrying if max attempts are reached
180203
}
181204
waitDuration := calculateBackoff(retryCount)
182-
log.Warn("Retrying request due to error", zap.String("method", method), zap.String("endpoint", endpoint), zap.Int("retryCount", retryCount), zap.Duration("waitDuration", waitDuration), zap.Error(err), zap.String("status_message", statusMessage))
183-
time.Sleep(waitDuration)
184-
continue
205+
log.Warn("Retrying request due to transient error", zap.String("method", method), zap.String("endpoint", endpoint), zap.Int("retryCount", retryCount), zap.Duration("waitDuration", waitDuration), zap.Error(err))
206+
time.Sleep(waitDuration) // Wait before retrying
207+
continue // Continue to next iteration after waiting
185208
}
186209

187210
// Handle error responses
@@ -193,6 +216,7 @@ func (c *Client) executeRequestWithRetries(method, endpoint string, body, out in
193216
break
194217
}
195218
}
219+
196220
// Handles final non-API error.
197221
if err != nil {
198222
return nil, err

0 commit comments

Comments
 (0)