Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
* [BUGFIX] Memberlist: Drop incoming TCP transport packets when digest verification fails, preventing corrupted payloads from being forwarded. #7474
* [BUGFIX] Compactor: Fix stale `cortex_bucket_index_last_successful_update_timestamp_seconds` metric not being cleaned up when tenant ownership changes due to ring rebalancing. This caused false alarms on bucket index update rate when a tenant moved between compactors. #7485
* [BUGFIX] Security: Fix stored XSS vulnerability in Alertmanager and Store Gateway status pages by replacing `text/template` with `html/template`. #7512
* [BUGFIX] Security: Limit decompressed gzip output in `ParseProtoReader` and OTLP ingestion path. The decompressed body is now capped by `-distributor.otlp-max-recv-msg-size`. #7515

## 1.21.0 2026-04-24

Expand Down
8 changes: 4 additions & 4 deletions pkg/util/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,11 +220,11 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi
}
body, err = decompressFromBuffer(&buf, maxSize, RawSnappy, sp)
case Gzip:
reader, err = gzip.NewReader(reader)
if err != nil {
return nil, err
gzReader, gzErr := gzip.NewReader(reader)
if gzErr != nil {
return nil, gzErr
}
_, err = buf.ReadFrom(reader)
_, err = buf.ReadFrom(io.LimitReader(gzReader, int64(maxSize)+1))
body = buf.Bytes()
}
return body, err
Expand Down
24 changes: 24 additions & 0 deletions pkg/util/http_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package util_test

import (
"bytes"
"compress/gzip"
"context"
"html/template"
"io"
Expand Down Expand Up @@ -220,3 +221,26 @@ func TestIsRequestBodyTooLargeRegression(t *testing.T) {
_, err := io.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), io.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
assert.True(t, util.IsRequestBodyTooLarge(err))
}

func TestParseProtoReader_GzipDecompressionBomb(t *testing.T) {
// Create a gzip payload where decompressed size far exceeds maxSize.
const maxSize = 4096 // 4 KB limit on decompressed output
uncompressed := make([]byte, 1<<20) // 1 MB of zeros

var compressed bytes.Buffer
gzw := gzip.NewWriter(&compressed)
_, err := gzw.Write(uncompressed)
require.NoError(t, err)
require.NoError(t, gzw.Close())

// The compressed payload is small enough to pass the compressed-size limit,
// but decompresses to far more than maxSize.
require.Less(t, compressed.Len(), maxSize)

var fromWire cortexpb.PreallocWriteRequest
err = util.ParseProtoReader(context.Background(), io.NopCloser(&compressed), 0, maxSize, &fromWire, util.Gzip)
// The decompressed output should be limited to maxSize+1 bytes, causing a
// proto unmarshal error (not an OOM). The key assertion is that we don't
// allocate 1 MB of memory.
assert.NotNil(t, err)
}
1 change: 1 addition & 0 deletions pkg/util/push/otlp.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) (
if err != nil {
return req, err
}
reader = io.LimitReader(reader, int64(maxSize)+1)
}

var buf bytes.Buffer
Expand Down
Loading