|
| 1 | +package core |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + "io" |
| 6 | + "io/ioutil" |
| 7 | + "math" |
| 8 | + "net/http" |
| 9 | + "time" |
| 10 | + |
| 11 | + "github.com/hashicorp/go-cleanhttp" |
| 12 | + "github.com/hashicorp/go-hclog" |
| 13 | +) |
| 14 | + |
| 15 | +var ( |
| 16 | + // Default retry configuration |
| 17 | + defaultRetryWaitMin = 1 * time.Second |
| 18 | + defaultRetryWaitMax = 30 * time.Second |
| 19 | + defaultRetryMax = 4 |
| 20 | +) |
| 21 | + |
| 22 | +// CheckForRetry specifies a policy for handling retries. It is called |
| 23 | +// following each request with the response and error values returned by |
| 24 | +// the http.Client. If it returns false, the Client stops retrying |
| 25 | +// and returns the response to the caller. If it returns an error , |
| 26 | +// that error value is returned in lieu of the error from the request . |
| 27 | +type CheckForRetry func(resp *http.Response, err error) (bool, error) |
| 28 | + |
| 29 | +// DefaultRetryPolicy provides a default callback for Client.CheckForRetry, |
| 30 | +// will retry on connection errors and server errors . |
| 31 | +func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { |
| 32 | + if err != nil { |
| 33 | + return true, err |
| 34 | + } |
| 35 | + // Check the response code. Here, we retry on 500—range responses to allow |
| 36 | + //the server time to recover |
| 37 | + if resp.StatusCode == 0 || resp.StatusCode >= 500 { |
| 38 | + return true, nil |
| 39 | + } |
| 40 | + return false, nil |
| 41 | +} |
| 42 | + |
| 43 | +// Backoff specifies a policy for how long to wait between retries. |
| 44 | +// It is called after a failing request to determine the amount of time |
| 45 | +// that should pass before trying again. |
| 46 | +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration |
| 47 | + |
| 48 | +// DefaultBackoff provides a default callback for Client.Backoff which |
| 49 | +// will perform exponential backoff based on the attempt number and limited |
| 50 | +// by the provided minimum and maximum durations. |
| 51 | +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { |
| 52 | + mult := math.Pow(2, float64(attemptNum)) * float64(min) |
| 53 | + sleep := time.Duration(mult) |
| 54 | + if float64(sleep) != mult || sleep > max { |
| 55 | + sleep = max |
| 56 | + } |
| 57 | + return sleep |
| 58 | +} |
| 59 | + |
| 60 | +// Client is used to make HTTP requests. It adds additional functionality |
| 61 | +// like automatic retries to tolerate minor outages. |
| 62 | +type Client struct { |
| 63 | + HTTPClient *http.Client // Internal HTTP client. |
| 64 | + RetryWaitMin time.Duration // Minimum time to wait |
| 65 | + RetryWaitMax time.Duration // Maximum time to wait |
| 66 | + RetryMax int // Maximum number of retries |
| 67 | + |
| 68 | + // CheckRetry specifies the policy for handling retries, and is called |
| 69 | + // after each request. The default policy is DefaultRetryPolicy. |
| 70 | + CheckForRetry CheckForRetry |
| 71 | + |
| 72 | + // Backoff specifies the policy for how long to wait between retries |
| 73 | + Backoff Backoff |
| 74 | +} |
| 75 | + |
| 76 | +func NewClient() *Client { |
| 77 | + return &Client{ |
| 78 | + HTTPClient: cleanhttp.DefaultClient(), |
| 79 | + RetryWaitMin: defaultRetryWaitMin, |
| 80 | + RetryWaitMax: defaultRetryWaitMax, |
| 81 | + RetryMax: defaultRetryMax, |
| 82 | + CheckForRetry: DefaultRetryPolicy, |
| 83 | + Backoff: DefaultBackoff, |
| 84 | + } |
| 85 | +} |
| 86 | + |
| 87 | +// Request wraps the metadata needed to create HTTP requests. |
| 88 | +type Request struct { |
| 89 | + // body is a seekable reader over the request body payload. This is |
| 90 | + // used to rewind the request data in between retries. |
| 91 | + body io.ReadSeeker |
| 92 | + |
| 93 | + // Embed an HTTP request directly. This makes a *Request act exactly |
| 94 | + // like an *http.Request so that all meta methods are supported. |
| 95 | + *http.Request |
| 96 | +} |
| 97 | + |
| 98 | +// Try to read the response body so we can reuse this connection. |
| 99 | +func (c *Client) drainBody(body io.ReadCloser) { |
| 100 | + defer body.Close() |
| 101 | + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) |
| 102 | + if err != nil { |
| 103 | + fmt.Printf("[ERR] error reading response body: %v", err) |
| 104 | + } |
| 105 | +} |
| 106 | + |
| 107 | +// Get is a convenience helper for doing simple GET requests. |
| 108 | +func (c *Client) Get(url string) (*http.Response, error) { |
| 109 | + req, err := NewRequest("GET", url, nil) |
| 110 | + if err != nil { |
| 111 | + return nil, err |
| 112 | + } |
| 113 | + return c.Do(req) |
| 114 | +} |
| 115 | + |
| 116 | +// Post is a convenience method for doing simple POST requests. |
| 117 | +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { |
| 118 | + req, err := NewRequest("POST", url, body) |
| 119 | + if err != nil { |
| 120 | + return nil, err |
| 121 | + } |
| 122 | + req.Header.Set("Content-Type", bodyType) |
| 123 | + return c.Do(req) |
| 124 | +} |
| 125 | + |
| 126 | +// NewRequest creates a new wrapped request. |
| 127 | +func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { |
| 128 | + // Wrap the body in a noop ReadCloser if non-nil. This prevents the |
| 129 | + // reader from being closed by the HTTP client. |
| 130 | + var rcBody io.ReadCloser |
| 131 | + if body != nil { |
| 132 | + rcBody = ioutil.NopCloser(body) |
| 133 | + } |
| 134 | + |
| 135 | + // Make the request with the noop-closer for the body. |
| 136 | + httpReq, err := http.NewRequest(method, url, rcBody) |
| 137 | + if err != nil { |
| 138 | + return nil, err |
| 139 | + } |
| 140 | + |
| 141 | + return &Request{body, httpReq}, nil |
| 142 | +} |
| 143 | + |
| 144 | +// Do wraps calling an HTTP method with retries. |
| 145 | +func (c *Client) Do(req *Request) (*http.Response, error) { |
| 146 | + if c.HTTPClient == nil { |
| 147 | + c.HTTPClient = cleanhttp.DefaultPooledClient() |
| 148 | + } |
| 149 | + |
| 150 | + logger := c.logger() |
| 151 | + |
| 152 | + if logger != nil { |
| 153 | + switch v := logger.(type) { |
| 154 | + case Logger: |
| 155 | + v.Printf("[DEBUG] %s %s", req.Method, req.URL) |
| 156 | + case hclog.Logger: |
| 157 | + v.Debug("performing request", "method", req.Method, "url", req.URL) |
| 158 | + } |
| 159 | + } |
| 160 | + |
| 161 | + var resp *http.Response |
| 162 | + var err error |
| 163 | + |
| 164 | + for i := 0; ; i++ { |
| 165 | + var code int // HTTP response code |
| 166 | + |
| 167 | + // Always rewind the request body when non-nil. |
| 168 | + if req.body != nil { |
| 169 | + body, err := req.body() |
| 170 | + if err != nil { |
| 171 | + c.HTTPClient.CloseIdleConnections() |
| 172 | + return resp, err |
| 173 | + } |
| 174 | + if c, ok := body.(io.ReadCloser); ok { |
| 175 | + req.Body = c |
| 176 | + } else { |
| 177 | + req.Body = ioutil.NopCloser(body) |
| 178 | + } |
| 179 | + } |
| 180 | + |
| 181 | + if c.RequestLogHook != nil && logger != nil { |
| 182 | + switch v := logger.(type) { |
| 183 | + case Logger: |
| 184 | + c.RequestLogHook(v, req.Request, i) |
| 185 | + case hclog.Logger: |
| 186 | + c.RequestLogHook(hookLogger{v}, req.Request, i) |
| 187 | + default: |
| 188 | + c.RequestLogHook(nil, req.Request, i) |
| 189 | + } |
| 190 | + } |
| 191 | + |
| 192 | + // Attempt the request |
| 193 | + resp, err = c.HTTPClient.Do(req.Request) |
| 194 | + if resp != nil { |
| 195 | + code = resp.StatusCode |
| 196 | + } |
| 197 | + |
| 198 | + // Check if we should continue with retries. |
| 199 | + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) |
| 200 | + |
| 201 | + if logger != nil { |
| 202 | + if err != nil { |
| 203 | + switch v := logger.(type) { |
| 204 | + case Logger: |
| 205 | + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) |
| 206 | + case hclog.Logger: |
| 207 | + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) |
| 208 | + } |
| 209 | + } else { |
| 210 | + // Call this here to maintain the behavior of logging all requests, |
| 211 | + // even if CheckRetry signals to stop. |
| 212 | + if c.ResponseLogHook != nil { |
| 213 | + // Call the response logger function if provided. |
| 214 | + switch v := logger.(type) { |
| 215 | + case Logger: |
| 216 | + c.ResponseLogHook(v, resp) |
| 217 | + case hclog.Logger: |
| 218 | + c.ResponseLogHook(hookLogger{v}, resp) |
| 219 | + default: |
| 220 | + c.ResponseLogHook(nil, resp) |
| 221 | + } |
| 222 | + } |
| 223 | + } |
| 224 | + } |
| 225 | + |
| 226 | + // Now decide if we should continue. |
| 227 | + if !checkOK { |
| 228 | + if checkErr != nil { |
| 229 | + err = checkErr |
| 230 | + } |
| 231 | + c.HTTPClient.CloseIdleConnections() |
| 232 | + return resp, err |
| 233 | + } |
| 234 | + |
| 235 | + // We do this before drainBody beause there's no need for the I/O if |
| 236 | + // we're breaking out |
| 237 | + remain := c.RetryMax - i |
| 238 | + if remain <= 0 { |
| 239 | + break |
| 240 | + } |
| 241 | + |
| 242 | + // We're going to retry, consume any response to reuse the connection. |
| 243 | + if err == nil && resp != nil { |
| 244 | + c.drainBody(resp.Body) |
| 245 | + } |
| 246 | + |
| 247 | + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) |
| 248 | + desc := fmt.Sprintf("%s %s", req.Method, req.URL) |
| 249 | + if code > 0 { |
| 250 | + desc = fmt.Sprintf("%s (status: %d)", desc, code) |
| 251 | + } |
| 252 | + fmt.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) |
| 253 | + time.Sleep(wait) |
| 254 | + } |
| 255 | + |
| 256 | + // Return an error if we fail out of the retry loop |
| 257 | + return nil, fmt.Errorf("%s %s giving up after %d attempts", |
| 258 | + req.Method, req.URL, c.RetryMax+1) |
| 259 | +} |
0 commit comments