diff --git a/Gopkg.lock b/Gopkg.lock
index c8a65645eac1fd54dabd893df2edca8dac4790ec..bc716a09b7cf42a2e8d009298327e12a64df59a2 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -187,7 +187,7 @@
 
 [[projects]]
   branch = "lazy-load-chunks"
-  digest = "1:ec8e0308d1e557f50317a6437073a7a859d73e4cf8e4c20a60d7009e352353c6"
+  digest = "1:bf1fa66c54722bc8664f1465e427cd6fe7df52f2b6fd5ab996baf37601687b70"
   name = "github.com/cortexproject/cortex"
   packages = [
     "pkg/chunk",
@@ -211,10 +211,9 @@
     "pkg/util/middleware",
     "pkg/util/spanlogger",
     "pkg/util/validation",
-    "pkg/util/wire",
   ]
   pruneopts = "UT"
-  revision = "161f6716cba9a32f07f359c4f9f8578e0c5d5ae8"
+  revision = "95a3f308e95617732b76e337874e83ccf173cf14"
   source = "https://github.com/grafana/cortex"
 
 [[projects]]
@@ -1367,7 +1366,6 @@
     "github.com/cortexproject/cortex/pkg/util",
     "github.com/cortexproject/cortex/pkg/util/flagext",
     "github.com/cortexproject/cortex/pkg/util/validation",
-    "github.com/cortexproject/cortex/pkg/util/wire",
     "github.com/fatih/color",
     "github.com/go-kit/kit/log",
     "github.com/go-kit/kit/log/level",
@@ -1391,10 +1389,12 @@
     "github.com/prometheus/prometheus/discovery/targetgroup",
     "github.com/prometheus/prometheus/pkg/labels",
     "github.com/prometheus/prometheus/pkg/relabel",
+    "github.com/prometheus/prometheus/pkg/textparse",
     "github.com/prometheus/prometheus/relabel",
     "github.com/stretchr/testify/assert",
     "github.com/stretchr/testify/require",
     "github.com/weaveworks/common/httpgrpc",
+    "github.com/weaveworks/common/httpgrpc/server",
     "github.com/weaveworks/common/middleware",
     "github.com/weaveworks/common/server",
     "github.com/weaveworks/common/tracing",
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index 1cff642f9294c11dc937b16d1703c7b5b7e29d11..55a777f7d1916cfbee23432948f9c334f98967cd 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -184,7 +184,7 @@ func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediat
 	return nil
 }
 
-func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelPair) {
+func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelAdapter) {
 	instance.streamsMtx.Lock()
 	defer instance.streamsMtx.Unlock()
 
@@ -234,18 +234,18 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream) {
 
 	if len(stream.chunks) == 0 {
 		delete(instance.streams, stream.fp)
-		instance.index.Delete(client.FromLabelPairsToLabels(stream.labels), stream.fp)
+		instance.index.Delete(client.FromLabelAdaptersToLabels(stream.labels), stream.fp)
 		instance.streamsRemovedTotal.Inc()
 	}
 }
 
-func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelPair, cs []*chunkDesc) error {
+func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelAdapter, cs []*chunkDesc) error {
 	userID, err := user.ExtractOrgID(ctx)
 	if err != nil {
 		return err
 	}
 
-	metric := fromLabelPairs(labelPairs)
+	metric := client.FromLabelAdaptersToMetric(labelPairs)
 	metric[nameLabel] = logsValue
 
 	wireChunks := make([]chunk.Chunk, 0, len(cs))
@@ -288,11 +288,3 @@ func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelP
 
 	return nil
 }
-
-func fromLabelPairs(ls []client.LabelPair) model.Metric {
-	m := make(model.Metric, len(ls))
-	for _, l := range ls {
-		m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
-	}
-	return m
-}
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index f2a6529ac202c9851f4fe7d76359c14fb7031c2d..93d8453c581d82ed6ec2b802dcc6bef586e06c9f 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -47,7 +47,7 @@ type stream struct {
 	// Not thread-safe; assume accesses to this are locked by caller.
 	chunks []chunkDesc
 	fp     model.Fingerprint
-	labels []client.LabelPair
+	labels []client.LabelAdapter
 }
 
 type chunkDesc struct {
@@ -58,7 +58,7 @@ type chunkDesc struct {
 	lastUpdated time.Time
 }
 
-func newStream(fp model.Fingerprint, labels []client.LabelPair) *stream {
+func newStream(fp model.Fingerprint, labels []client.LabelAdapter) *stream {
 	return &stream{
 		fp:     fp,
 		labels: labels,
@@ -96,7 +96,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error {
 	}
 
 	if appendErr == chunkenc.ErrOutOfOrder {
-		return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelPairsToLabels(s.labels).String())
+		return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelAdaptersToLabels(s.labels).String())
 	}
 
 	return appendErr
@@ -121,5 +121,5 @@ func (s *stream) Iterator(from, through time.Time, direction logproto.Direction)
 		}
 	}
 
-	return iter.NewNonOverlappingIterator(iterators, client.FromLabelPairsToLabels(s.labels).String()), nil
+	return iter.NewNonOverlappingIterator(iterators, client.FromLabelAdaptersToLabels(s.labels).String()), nil
 }
diff --git a/pkg/logproto/dep.go b/pkg/logproto/dep.go
index 68eb69c94481e9b0624d37e48b64644510afb20c..34d7bc3d65d17d7f57cf0b69dd7922756bab9710 100644
--- a/pkg/logproto/dep.go
+++ b/pkg/logproto/dep.go
@@ -2,6 +2,6 @@ package logproto
 
 import (
 	// trick dep into including this, needed by the generated code.
-	_ "github.com/cortexproject/cortex/pkg/util/wire"
+	_ "github.com/cortexproject/cortex/pkg/chunk/storage"
 	_ "github.com/gogo/protobuf/types"
 )
diff --git a/pkg/util/conv.go b/pkg/util/conv.go
index b44bed5ce77ac7eb86cc6733f18871252e77745a..77dce97f315515b3f144d12c357da8755c99f4dc 100644
--- a/pkg/util/conv.go
+++ b/pkg/util/conv.go
@@ -2,22 +2,21 @@ package util
 
 import (
 	"github.com/cortexproject/cortex/pkg/ingester/client"
-	"github.com/cortexproject/cortex/pkg/util/wire"
 	"github.com/grafana/loki/pkg/parser"
 )
 
 // ToClientLabels parses the labels and converts them to the Cortex type.
-func ToClientLabels(labels string) ([]client.LabelPair, error) {
+func ToClientLabels(labels string) ([]client.LabelAdapter, error) {
 	ls, err := parser.Labels(labels)
 	if err != nil {
 		return nil, err
 	}
 
-	pairs := make([]client.LabelPair, 0, len(ls))
+	pairs := make([]client.LabelAdapter, 0, len(ls))
 	for i := 0; i < len(ls); i++ {
-		pairs = append(pairs, client.LabelPair{
-			Name:  wire.Bytes(ls[i].Name),
-			Value: wire.Bytes(ls[i].Value),
+		pairs = append(pairs, client.LabelAdapter{
+			Name:  ls[i].Name,
+			Value: ls[i].Value,
 		})
 	}
 	return pairs, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
index 67111722cd4d9ddd2d1dd5977af409486573c461..b3a82eb960ca6c77903b3f113a0706548ce960fa 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
@@ -20,6 +20,7 @@ import (
 	"github.com/cortexproject/cortex/pkg/chunk/cache"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/extract"
+	"github.com/cortexproject/cortex/pkg/util/flagext"
 	"github.com/cortexproject/cortex/pkg/util/spanlogger"
 	"github.com/cortexproject/cortex/pkg/util/validation"
 	"github.com/weaveworks/common/httpgrpc"
@@ -58,25 +59,21 @@ type StoreConfig struct {
 	ChunkCacheConfig       cache.Config
 	WriteDedupeCacheConfig cache.Config
 
-	MinChunkAge              time.Duration
-	CardinalityCacheSize     int
-	CardinalityCacheValidity time.Duration
-	CardinalityLimit         int
-
+	MinChunkAge           time.Duration
 	CacheLookupsOlderThan time.Duration
 }
 
 // RegisterFlags adds the flags required to config this to the given FlagSet
 func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) {
 	cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f)
-
 	cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f)
 
 	f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.")
-	f.IntVar(&cfg.CardinalityCacheSize, "store.cardinality-cache-size", 0, "Size of in-memory cardinality cache, 0 to disable.")
-	f.DurationVar(&cfg.CardinalityCacheValidity, "store.cardinality-cache-validity", 1*time.Hour, "Period for which entries in the cardinality cache are valid.")
-	f.IntVar(&cfg.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.")
 	f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.")
+
+	// Deprecated.
+	flagext.DeprecatedFlag(f, "store.cardinality-cache-size", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.size instead.")
+	flagext.DeprecatedFlag(f, "store.cardinality-cache-validity", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.duration instead.")
 }
 
 // store implements Store
@@ -211,7 +208,7 @@ func (c *store) validateQuery(ctx context.Context, from model.Time, through *mod
 
 	maxQueryLength := c.limits.MaxQueryLength(userID)
 	if maxQueryLength > 0 && (*through).Sub(from) > maxQueryLength {
-		return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, length > limit (%s > %s)", (*through).Sub(from), maxQueryLength)
+		return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(from), maxQueryLength)
 	}
 
 	now := model.Now()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
index 29746f87e56708499bea4f4b009bd87311694c33..0282c08e99763a3f7e627bf3dde9cba4690a5baf 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
@@ -161,8 +161,9 @@ func (b *bigchunk) Len() int {
 }
 
 func (b *bigchunk) Size() int {
-	sum := 0
+	sum := 2 // For the number of sub chunks.
 	for _, c := range b.chunks {
+		sum += 2 // For the length of the sub chunk.
 		sum += len(c.Bytes())
 	}
 	return sum
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
index a43980bde1ac02294bf92b1b915ce60aec05ea88..adf155f6c5e5f03c9ff603765d14a5a1f92330a0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
@@ -114,7 +114,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
 				decodeContext := chunk.NewDecodeContext()
 
 				var processingErr error
-				var recievedChunks = 0
+				var receivedChunks = 0
 
 				// rows are returned in key order, not order in row list
 				err := table.ReadRows(ctx, page, func(row bigtable.Row) bool {
@@ -130,7 +130,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
 						return false
 					}
 
-					recievedChunks++
+					receivedChunks++
 					outs <- chunk
 					return true
 				})
@@ -139,8 +139,8 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
 					errs <- processingErr
 				} else if err != nil {
 					errs <- errors.WithStack(err)
-				} else if recievedChunks < len(page) {
-					errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), recievedChunks))
+				} else if receivedChunks < len(page) {
+					errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), receivedChunks))
 				}
 			}(page)
 		}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
index 93f36ab0f7a4640839525be6774fe10e9efa150a..4a0b742e6af287c4386d8447aee98f220a554994 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
@@ -23,7 +23,9 @@ import (
 )
 
 var (
-	errCardinalityExceeded = errors.New("cardinality limit exceeded")
+	// ErrCardinalityExceeded is returned when the user reads a row that
+	// is too large.
+	ErrCardinalityExceeded = errors.New("cardinality limit exceeded")
 
 	indexLookupsPerQuery = promauto.NewHistogram(prometheus.HistogramOpts{
 		Namespace: "cortex",
@@ -57,8 +59,6 @@ var (
 // seriesStore implements Store
 type seriesStore struct {
 	store
-	cardinalityCache *cache.FifoCache
-
 	writeDedupeCache cache.Cache
 }
 
@@ -89,10 +89,6 @@ func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Ob
 			limits:  limits,
 			Fetcher: fetcher,
 		},
-		cardinalityCache: cache.NewFifoCache("cardinality", cache.FifoCacheConfig{
-			Size:     cfg.CardinalityCacheSize,
-			Validity: cfg.CardinalityCacheValidity,
-		}),
 		writeDedupeCache: writeDedupeCache,
 	}, nil
 }
@@ -229,15 +225,21 @@ func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from
 				ids = intersectStrings(ids, incoming)
 			}
 		case err := <-incomingErrors:
-			if err == errCardinalityExceeded {
+			// The idea is that if we have 2 matchers, and if one returns a lot of
+			// series and the other returns only 10 (a few), we don't lookup the first one at all.
+			// We just manually filter through the 10 series again using "filterChunksByMatchers",
+			// saving us from looking up and intersecting a lot of series.
+			if err == ErrCardinalityExceeded {
 				cardinalityExceededErrors++
 			} else {
 				lastErr = err
 			}
 		}
 	}
+
+	// But if every single matcher returns a lot of series, then it makes sense to abort the query.
 	if cardinalityExceededErrors == len(matchers) {
-		return nil, errCardinalityExceeded
+		return nil, ErrCardinalityExceeded
 	} else if lastErr != nil {
 		return nil, lastErr
 	}
@@ -270,36 +272,12 @@ func (c *seriesStore) lookupSeriesByMetricNameMatcher(ctx context.Context, from,
 	}
 	level.Debug(log).Log("queries", len(queries))
 
-	for _, query := range queries {
-		value, ok := c.cardinalityCache.Get(ctx, query.HashValue)
-		if !ok {
-			continue
-		}
-		cardinality := value.(int)
-		if cardinality > c.cfg.CardinalityLimit {
-			return nil, errCardinalityExceeded
-		}
-	}
-
 	entries, err := c.lookupEntriesByQueries(ctx, queries)
 	if err != nil {
 		return nil, err
 	}
 	level.Debug(log).Log("entries", len(entries))
 
-	// TODO This is not correct, will overcount for queries > 24hrs
-	keys := make([]string, 0, len(queries))
-	values := make([]interface{}, 0, len(queries))
-	for _, query := range queries {
-		keys = append(keys, query.HashValue)
-		values = append(values, len(entries))
-	}
-	c.cardinalityCache.Put(ctx, keys, values)
-
-	if len(entries) > c.cfg.CardinalityLimit {
-		return nil, errCardinalityExceeded
-	}
-
 	ids, err := c.parseIndexEntries(ctx, entries, matcher)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
similarity index 97%
rename from vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go
rename to vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
index dfabadd8e061fa643f46f90f42d4d36c3ec9dd8b..c4804995ff82ae198235a25c37713050fc0974f1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
@@ -1,4 +1,4 @@
-package wire
+package storage
 
 import (
 	"bytes"
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
index cc36d5cb27d8fd2b8dcd7980da0214effc04a34d..1469e92d75ed5194b44896251fcc6bdb1e596afb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
@@ -3,6 +3,9 @@ package storage
 import (
 	"time"
 
+	"github.com/cortexproject/cortex/pkg/util/flagext"
+	"github.com/cortexproject/cortex/pkg/util/validation"
+
 	"github.com/cortexproject/cortex/pkg/chunk/cache"
 	"github.com/cortexproject/cortex/pkg/chunk/gcp"
 
@@ -16,11 +19,15 @@ type fixture struct {
 
 func (f fixture) Name() string { return "caching-store" }
 func (f fixture) Clients() (chunk.IndexClient, chunk.ObjectClient, chunk.TableClient, chunk.SchemaConfig, error) {
+	limits, err := defaultLimits()
+	if err != nil {
+		return nil, nil, nil, chunk.SchemaConfig{}, err
+	}
 	indexClient, objectClient, tableClient, schemaConfig, err := f.fixture.Clients()
 	indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{
 		Size:     500,
 		Validity: 5 * time.Minute,
-	}), 5*time.Minute)
+	}), 5*time.Minute, limits)
 	return indexClient, objectClient, tableClient, schemaConfig, err
 }
 func (f fixture) Teardown() error { return f.fixture.Teardown() }
@@ -29,3 +36,9 @@ func (f fixture) Teardown() error { return f.fixture.Teardown() }
 var Fixtures = []testutils.Fixture{
 	fixture{gcp.Fixtures[0]},
 }
+
+func defaultLimits() (*validation.Overrides, error) {
+	var defaults validation.Limits
+	flagext.DefaultValues(&defaults)
+	return validation.NewOverrides(defaults)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
index fcb9a02204a1d8c503d389e1c06dd27f67f553c2..c4df850b885b0a058962060e2b8a83caf15a4e0f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
@@ -5,15 +5,18 @@ import (
 	"sync"
 	"time"
 
+	"github.com/go-kit/kit/log/level"
+	proto "github.com/golang/protobuf/proto"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promauto"
+	"github.com/weaveworks/common/user"
+
 	"github.com/cortexproject/cortex/pkg/chunk"
 	"github.com/cortexproject/cortex/pkg/chunk/cache"
 	chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/spanlogger"
-	"github.com/go-kit/kit/log/level"
-	proto "github.com/golang/protobuf/proto"
-	"github.com/prometheus/client_golang/prometheus"
-	"github.com/prometheus/client_golang/prometheus/promauto"
+	"github.com/cortexproject/cortex/pkg/util/validation"
 )
 
 var (
@@ -43,9 +46,10 @@ type cachingIndexClient struct {
 	chunk.IndexClient
 	cache    cache.Cache
 	validity time.Duration
+	limits   *validation.Overrides
 }
 
-func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration) chunk.IndexClient {
+func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits *validation.Overrides) chunk.IndexClient {
 	if c == nil {
 		return client
 	}
@@ -54,6 +58,7 @@ func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity tim
 		IndexClient: client,
 		cache:       cache.NewSnappy(c),
 		validity:    validity,
+		limits:      limits,
 	}
 }
 
@@ -65,6 +70,12 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
 	// We cache the entire row, so filter client side.
 	callback = chunk_util.QueryFilter(callback)
 
+	userID, err := user.ExtractOrgID(ctx)
+	if err != nil {
+		return err
+	}
+	cardinalityLimit := int32(s.limits.CardinalityLimit(userID))
+
 	// Build list of keys to lookup in the cache.
 	keys := make([]string, 0, len(queries))
 	queriesByKey := make(map[string][]chunk.IndexQuery, len(queries))
@@ -76,6 +87,10 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
 
 	batches, misses := s.cacheFetch(ctx, keys)
 	for _, batch := range batches {
+		if cardinalityLimit > 0 && batch.Cardinality > cardinalityLimit {
+			return chunk.ErrCardinalityExceeded
+		}
+
 		queries := queriesByKey[batch.Key]
 		for _, query := range queries {
 			callback(query, batch)
@@ -115,7 +130,7 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
 		results[key] = rb
 	}
 
-	err := s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool {
+	err = s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool {
 		resultsMtx.Lock()
 		defer resultsMtx.Unlock()
 		key := queryKey(cacheableQuery)
@@ -135,9 +150,20 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
 		defer resultsMtx.Unlock()
 		keys := make([]string, 0, len(results))
 		batches := make([]ReadBatch, 0, len(results))
+		var cardinalityErr error
 		for key, batch := range results {
+			cardinality := int32(len(batch.Entries))
+			if cardinalityLimit > 0 && cardinality > cardinalityLimit {
+				batch.Cardinality = cardinality
+				batch.Entries = nil
+				cardinalityErr = chunk.ErrCardinalityExceeded
+			}
+
 			keys = append(keys, key)
 			batches = append(batches, batch)
+			if cardinalityErr != nil {
+				continue
+			}
 
 			queries := queriesByKey[key]
 			for _, query := range queries {
@@ -145,8 +171,8 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
 			}
 		}
 		s.cacheStore(ctx, keys, batches)
+		return cardinalityErr
 	}
-	return nil
 }
 
 // Iterator implements chunk.ReadBatch.
@@ -250,7 +276,6 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat
 		}
 
 		if readBatch.Expiry != 0 && time.Now().After(time.Unix(0, readBatch.Expiry)) {
-			level.Debug(log).Log("msg", "dropping index cache entry due to expiration", "key", key, "readBatch.Key", readBatch.Key, "expiry", time.Unix(0, readBatch.Expiry))
 			continue
 		}
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
index 7aa411fe2cd18e3b02148917a29c7d889c08c02b..761e22f4b80a21169f020e90c3d5fd6fb6916ef2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
@@ -3,17 +3,15 @@
 
 package storage
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+	fmt "fmt"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/gogo/protobuf/proto"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -27,14 +25,14 @@ var _ = math.Inf
 const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 type Entry struct {
-	Column github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Column"`
-	Value  github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Value"`
+	Column Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=Bytes" json:"Column"`
+	Value  Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=Bytes" json:"Value"`
 }
 
 func (m *Entry) Reset()      { *m = Entry{} }
 func (*Entry) ProtoMessage() {}
 func (*Entry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{0}
+	return fileDescriptor_a60039d4a2d816f6, []int{0}
 }
 func (m *Entry) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -51,8 +49,8 @@ func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *Entry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Entry.Merge(dst, src)
+func (m *Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Entry.Merge(m, src)
 }
 func (m *Entry) XXX_Size() int {
 	return m.Size()
@@ -64,16 +62,19 @@ func (m *Entry) XXX_DiscardUnknown() {
 var xxx_messageInfo_Entry proto.InternalMessageInfo
 
 type ReadBatch struct {
-	Entries []Entry `protobuf:"bytes,1,rep,name=entries" json:"entries"`
+	Entries []Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"`
 	Key     string  `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
 	// The time at which the key expires.
 	Expiry int64 `protobuf:"varint,3,opt,name=expiry,proto3" json:"expiry,omitempty"`
+	// The number of entries; used for cardinality limiting.
+	// entries will be empty when this is set.
+	Cardinality int32 `protobuf:"varint,4,opt,name=cardinality,proto3" json:"cardinality,omitempty"`
 }
 
 func (m *ReadBatch) Reset()      { *m = ReadBatch{} }
 func (*ReadBatch) ProtoMessage() {}
 func (*ReadBatch) Descriptor() ([]byte, []int) {
-	return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{1}
+	return fileDescriptor_a60039d4a2d816f6, []int{1}
 }
 func (m *ReadBatch) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -90,8 +91,8 @@ func (m *ReadBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *ReadBatch) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ReadBatch.Merge(dst, src)
+func (m *ReadBatch) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReadBatch.Merge(m, src)
 }
 func (m *ReadBatch) XXX_Size() int {
 	return m.Size()
@@ -123,10 +124,47 @@ func (m *ReadBatch) GetExpiry() int64 {
 	return 0
 }
 
+func (m *ReadBatch) GetCardinality() int32 {
+	if m != nil {
+		return m.Cardinality
+	}
+	return 0
+}
+
 func init() {
 	proto.RegisterType((*Entry)(nil), "storage.Entry")
 	proto.RegisterType((*ReadBatch)(nil), "storage.ReadBatch")
 }
+
+func init() {
+	proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_a60039d4a2d816f6)
+}
+
+var fileDescriptor_a60039d4a2d816f6 = []byte{
+	// 335 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30,
+	0x00, 0x44, 0x63, 0xd2, 0xa4, 0xaa, 0x0b, 0x08, 0x65, 0x40, 0x11, 0x83, 0x1b, 0x15, 0x21, 0x65,
+	0x21, 0x91, 0x80, 0x2f, 0x08, 0x62, 0x63, 0x0a, 0x12, 0x6b, 0xe5, 0xba, 0x26, 0x31, 0x4d, 0xed,
+	0xc8, 0x75, 0x50, 0xb3, 0xb1, 0xb1, 0xf2, 0x19, 0x7c, 0x4a, 0xc7, 0x8e, 0x15, 0x43, 0x45, 0xdd,
+	0x85, 0xb1, 0x9f, 0x80, 0x6a, 0x82, 0xd4, 0x81, 0xed, 0x9e, 0xef, 0x7c, 0x67, 0x19, 0xde, 0x67,
+	0x4c, 0xe5, 0xd5, 0x30, 0x22, 0x62, 0x12, 0x13, 0x21, 0x15, 0x9d, 0x95, 0x52, 0x3c, 0x53, 0xa2,
+	0x1a, 0x8a, 0xcb, 0x71, 0x16, 0x93, 0xbc, 0xe2, 0xe3, 0x78, 0xaa, 0x84, 0xc4, 0x19, 0x8d, 0x09,
+	0x26, 0x39, 0xe3, 0xd9, 0x80, 0xf1, 0x11, 0x9d, 0x0d, 0x48, 0xc1, 0x28, 0x57, 0x51, 0x29, 0x85,
+	0x12, 0x5e, 0xbb, 0xc9, 0x9c, 0x5d, 0xee, 0xd5, 0x66, 0x22, 0x13, 0xb1, 0xf1, 0x87, 0xd5, 0x93,
+	0x21, 0x03, 0x46, 0xfd, 0xde, 0xeb, 0x3f, 0x40, 0xe7, 0x8e, 0x2b, 0x59, 0x7b, 0x17, 0xd0, 0xbd,
+	0x15, 0x45, 0x35, 0xe1, 0x3e, 0x08, 0x40, 0x78, 0x98, 0x1c, 0xcd, 0x57, 0x3d, 0xeb, 0x73, 0xd5,
+	0x73, 0x92, 0x5a, 0xd1, 0x69, 0xea, 0x12, 0x63, 0x7a, 0xe7, 0xd0, 0x79, 0xc4, 0x45, 0x45, 0xfd,
+	0x83, 0xff, 0x52, 0xce, 0xcb, 0xce, 0xeb, 0xbf, 0x01, 0xd8, 0x49, 0x29, 0x1e, 0x25, 0x58, 0x91,
+	0xdc, 0x8b, 0x60, 0x9b, 0x72, 0x25, 0x19, 0x9d, 0xfa, 0x20, 0xb0, 0xc3, 0xee, 0xd5, 0x71, 0xd4,
+	0x3c, 0x36, 0x32, 0xd3, 0x49, 0x6b, 0x57, 0x92, 0xfe, 0x85, 0xbc, 0x13, 0x68, 0x8f, 0x69, 0x6d,
+	0x06, 0x3a, 0xe9, 0x4e, 0x7a, 0xa7, 0xd0, 0xa5, 0xb3, 0x92, 0xc9, 0xda, 0xb7, 0x03, 0x10, 0xda,
+	0x69, 0x43, 0x5e, 0x00, 0xbb, 0x04, 0xcb, 0x11, 0xe3, 0xb8, 0x60, 0xaa, 0xf6, 0x5b, 0x01, 0x08,
+	0x9d, 0x74, 0xff, 0x28, 0xb9, 0x59, 0xac, 0x91, 0xb5, 0x5c, 0x23, 0x6b, 0xbb, 0x46, 0xe0, 0x55,
+	0x23, 0xf0, 0xa1, 0x11, 0x98, 0x6b, 0x04, 0x16, 0x1a, 0x81, 0x2f, 0x8d, 0xc0, 0xb7, 0x46, 0xd6,
+	0x56, 0x23, 0xf0, 0xbe, 0x41, 0xd6, 0x62, 0x83, 0xac, 0xe5, 0x06, 0x59, 0x43, 0xd7, 0xfc, 0xcd,
+	0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xd2, 0x5d, 0xd9, 0xa3, 0x01, 0x00, 0x00,
+}
+
 func (this *Entry) Equal(that interface{}) bool {
 	if that == nil {
 		return this == nil
@@ -187,6 +225,9 @@ func (this *ReadBatch) Equal(that interface{}) bool {
 	if this.Expiry != that1.Expiry {
 		return false
 	}
+	if this.Cardinality != that1.Cardinality {
+		return false
+	}
 	return true
 }
 func (this *Entry) GoString() string {
@@ -204,7 +245,7 @@ func (this *ReadBatch) GoString() string {
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 7)
+	s := make([]string, 0, 8)
 	s = append(s, "&storage.ReadBatch{")
 	if this.Entries != nil {
 		vs := make([]*Entry, len(this.Entries))
@@ -215,6 +256,7 @@ func (this *ReadBatch) GoString() string {
 	}
 	s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
 	s = append(s, "Expiry: "+fmt.Sprintf("%#v", this.Expiry)+",\n")
+	s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -244,17 +286,17 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
 	dAtA[i] = 0xa
 	i++
 	i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Column.Size()))
-	n1, err := m.Column.MarshalTo(dAtA[i:])
-	if err != nil {
-		return 0, err
+	n1, err1 := m.Column.MarshalTo(dAtA[i:])
+	if err1 != nil {
+		return 0, err1
 	}
 	i += n1
 	dAtA[i] = 0x12
 	i++
 	i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Value.Size()))
-	n2, err := m.Value.MarshalTo(dAtA[i:])
-	if err != nil {
-		return 0, err
+	n2, err2 := m.Value.MarshalTo(dAtA[i:])
+	if err2 != nil {
+		return 0, err2
 	}
 	i += n2
 	return i, nil
@@ -298,6 +340,11 @@ func (m *ReadBatch) MarshalTo(dAtA []byte) (int, error) {
 		i++
 		i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Expiry))
 	}
+	if m.Cardinality != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality))
+	}
 	return i, nil
 }
 
@@ -342,6 +389,9 @@ func (m *ReadBatch) Size() (n int) {
 	if m.Expiry != 0 {
 		n += 1 + sovCachingIndexClient(uint64(m.Expiry))
 	}
+	if m.Cardinality != 0 {
+		n += 1 + sovCachingIndexClient(uint64(m.Cardinality))
+	}
 	return n
 }
 
@@ -373,10 +423,16 @@ func (this *ReadBatch) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForEntries := "[]Entry{"
+	for _, f := range this.Entries {
+		repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEntries += "}"
 	s := strings.Join([]string{`&ReadBatch{`,
-		`Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "Entry", "Entry", 1), `&`, ``, 1) + `,`,
+		`Entries:` + repeatedStringForEntries + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Expiry:` + fmt.Sprintf("%v", this.Expiry) + `,`,
+		`Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -404,7 +460,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -432,7 +488,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -441,6 +497,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCachingIndexClient
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -462,7 +521,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -471,6 +530,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCachingIndexClient
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -487,6 +549,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCachingIndexClient
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -514,7 +579,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -542,7 +607,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -551,6 +616,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCachingIndexClient
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -573,7 +641,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -583,6 +651,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCachingIndexClient
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -602,7 +673,26 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Expiry |= (int64(b) & 0x7F) << shift
+				m.Expiry |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType)
+			}
+			m.Cardinality = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCachingIndexClient
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Cardinality |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -616,6 +706,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCachingIndexClient
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCachingIndexClient
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -682,10 +775,13 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthCachingIndexClient
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthCachingIndexClient
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -714,6 +810,9 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) {
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthCachingIndexClient
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -732,32 +831,3 @@ var (
 	ErrInvalidLengthCachingIndexClient = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowCachingIndexClient   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_caching_index_client_2f4bf220288f700f)
-}
-
-var fileDescriptor_caching_index_client_2f4bf220288f700f = []byte{
-	// 331 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x90, 0xb1, 0x4e, 0xeb, 0x30,
-	0x14, 0x86, 0xe3, 0x9b, 0xdb, 0x54, 0x35, 0x08, 0xa1, 0x0c, 0x28, 0x62, 0x70, 0xab, 0x4e, 0x5d,
-	0x88, 0x25, 0xca, 0xc6, 0x16, 0xc4, 0xc6, 0x42, 0x90, 0x58, 0xab, 0xd4, 0x3d, 0x24, 0xa6, 0xa9,
-	0x1d, 0xb9, 0x0e, 0x34, 0x1b, 0x8f, 0xc0, 0x63, 0xb0, 0xf1, 0x1a, 0x1d, 0x3b, 0x56, 0x0c, 0x15,
-	0x75, 0x17, 0xc6, 0x3e, 0x02, 0xaa, 0x09, 0x12, 0x23, 0x12, 0xdb, 0xf9, 0xe4, 0xe3, 0xcf, 0xbf,
-	0x7f, 0x7c, 0x95, 0x72, 0x9d, 0x95, 0xc3, 0x90, 0xc9, 0x09, 0x65, 0x52, 0x69, 0x98, 0x15, 0x4a,
-	0xde, 0x03, 0xd3, 0x35, 0xd1, 0x62, 0x9c, 0x52, 0x96, 0x95, 0x62, 0x4c, 0xa7, 0x5a, 0xaa, 0x24,
-	0x05, 0xca, 0x12, 0x96, 0x71, 0x91, 0x0e, 0xb8, 0x18, 0xc1, 0x6c, 0xc0, 0x72, 0x0e, 0x42, 0x87,
-	0x85, 0x92, 0x5a, 0xfa, 0xcd, 0x7a, 0xe7, 0xf8, 0xe4, 0x87, 0x36, 0x95, 0xa9, 0xa4, 0xf6, 0x7c,
-	0x58, 0xde, 0x59, 0xb2, 0x60, 0xa7, 0xaf, 0x7b, 0xdd, 0x57, 0x84, 0x1b, 0x97, 0x42, 0xab, 0xca,
-	0xbf, 0xc1, 0xde, 0x85, 0xcc, 0xcb, 0x89, 0x08, 0x50, 0x07, 0xf5, 0xf6, 0xa3, 0xf3, 0xf9, 0xaa,
-	0xed, 0xbc, 0xad, 0xda, 0xfd, 0xdf, 0xe4, 0x2c, 0x35, 0xcf, 0xe9, 0x23, 0x57, 0x10, 0x46, 0x95,
-	0x86, 0x69, 0xec, 0x31, 0xab, 0xf2, 0xaf, 0x71, 0xe3, 0x36, 0xc9, 0x4b, 0x08, 0xfe, 0xfd, 0xdd,
-	0xd9, 0x78, 0xd8, 0x99, 0xba, 0x80, 0x5b, 0x31, 0x24, 0xa3, 0x28, 0xd1, 0x2c, 0xf3, 0x43, 0xdc,
-	0x04, 0xa1, 0x15, 0x87, 0x69, 0x80, 0x3a, 0x6e, 0x6f, 0xef, 0xf4, 0x20, 0xac, 0x8b, 0x08, 0xed,
-	0xaf, 0xa2, 0xff, 0xbb, 0x17, 0xe3, 0xef, 0x25, 0xff, 0x10, 0xbb, 0x63, 0xa8, 0x6c, 0x9a, 0x56,
-	0xbc, 0x1b, 0xfd, 0x23, 0xec, 0xc1, 0xac, 0xe0, 0xaa, 0x0a, 0xdc, 0x0e, 0xea, 0xb9, 0x71, 0x4d,
-	0xd1, 0xd9, 0x62, 0x4d, 0x9c, 0xe5, 0x9a, 0x38, 0xdb, 0x35, 0x41, 0x4f, 0x86, 0xa0, 0x17, 0x43,
-	0xd0, 0xdc, 0x10, 0xb4, 0x30, 0x04, 0xbd, 0x1b, 0x82, 0x3e, 0x0c, 0x71, 0xb6, 0x86, 0xa0, 0xe7,
-	0x0d, 0x71, 0x16, 0x1b, 0xe2, 0x2c, 0x37, 0xc4, 0x19, 0x7a, 0xb6, 0xd5, 0xfe, 0x67, 0x00, 0x00,
-	0x00, 0xff, 0xff, 0x95, 0x6d, 0x6d, 0xd0, 0xdd, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
index 1c22c94c8ab51f74064a834eeadeba4c9cba4b9e..22a9d01ffaff4c192abd72cee356047abed813ed 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
@@ -8,8 +8,8 @@ option (gogoproto.marshaler_all) = true;
 option (gogoproto.unmarshaler_all) = true;
 
 message Entry {
-    bytes Column = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
-    bytes Value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
+    bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false];
+    bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false];
 }
 
 message ReadBatch {
@@ -18,4 +18,8 @@ message ReadBatch {
 
     // The time at which the key expires.
     int64 expiry = 3;
+
+    // The number of entries; used for cardinality limiting.
+    // entries will be empty when this is set.
+    int32 cardinality = 4;
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
index d45ed09de02e765e8306b2801db4711c49e6635d..a0fd1b41406ac3c38ee9e4a0c580c30ef7cf2d75 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
@@ -98,7 +98,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf
 		if err != nil {
 			return nil, errors.Wrap(err, "error creating index client")
 		}
-		index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity)
+		index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity, limits)
 
 		objectStoreType := s.ObjectType
 		if objectStoreType == "" {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
index f177eefabf318270824af5a8301d6c74c398a765..f95d1e27877b1e8a78dfa8f05a186a56e7eb85be 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
@@ -1,11 +1,11 @@
 package client
 
 import (
-	"bytes"
 	stdjson "encoding/json"
 	"fmt"
 	"sort"
 	"strconv"
+	"strings"
 	"time"
 	"unsafe"
 
@@ -16,22 +16,6 @@ import (
 
 var json = jsoniter.ConfigCompatibleWithStandardLibrary
 
-// FromWriteRequest converts a WriteRequest proto into an array of samples.
-func FromWriteRequest(req *WriteRequest) []model.Sample {
-	// Just guess that there is one sample per timeseries
-	samples := make([]model.Sample, 0, len(req.Timeseries))
-	for _, ts := range req.Timeseries {
-		for _, s := range ts.Samples {
-			samples = append(samples, model.Sample{
-				Metric:    FromLabelPairs(ts.Labels),
-				Value:     model.SampleValue(s.Value),
-				Timestamp: model.Time(s.TimestampMs),
-			})
-		}
-	}
-	return samples
-}
-
 // ToWriteRequest converts an array of samples into a WriteRequest proto.
 func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *WriteRequest {
 	req := &WriteRequest{
@@ -42,7 +26,7 @@ func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *Wri
 	for _, s := range samples {
 		ts := PreallocTimeseries{
 			TimeSeries: TimeSeries{
-				Labels: ToLabelPairs(s.Metric),
+				Labels: FromMetricsToLabelAdapters(s.Metric),
 				Samples: []Sample{
 					{
 						Value:       float64(s.Value),
@@ -87,7 +71,7 @@ func ToQueryResponse(matrix model.Matrix) *QueryResponse {
 	resp := &QueryResponse{}
 	for _, ss := range matrix {
 		ts := TimeSeries{
-			Labels:  ToLabelPairs(ss.Metric),
+			Labels:  FromMetricsToLabelAdapters(ss.Metric),
 			Samples: make([]Sample, 0, len(ss.Values)),
 		}
 		for _, s := range ss.Values {
@@ -106,7 +90,7 @@ func FromQueryResponse(resp *QueryResponse) model.Matrix {
 	m := make(model.Matrix, 0, len(resp.Timeseries))
 	for _, ts := range resp.Timeseries {
 		var ss model.SampleStream
-		ss.Metric = FromLabelPairs(ts.Labels)
+		ss.Metric = FromLabelAdaptersToMetric(ts.Labels)
 		ss.Values = make([]model.SamplePair, 0, len(ts.Samples))
 		for _, s := range ts.Samples {
 			ss.Values = append(ss.Values, model.SamplePair{
@@ -153,7 +137,7 @@ func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (mo
 func FromMetricsForLabelMatchersResponse(resp *MetricsForLabelMatchersResponse) []model.Metric {
 	metrics := []model.Metric{}
 	for _, m := range resp.Metric {
-		metrics = append(metrics, FromLabelPairs(m.Labels))
+		metrics = append(metrics, FromLabelAdaptersToMetric(m.Labels))
 	}
 	return metrics
 }
@@ -208,70 +192,63 @@ func fromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) {
 	return result, nil
 }
 
-// ToLabelPairs builds a []LabelPair from a model.Metric
-func ToLabelPairs(metric model.Metric) []LabelPair {
-	labelPairs := make([]LabelPair, 0, len(metric))
-	for k, v := range metric {
-		labelPairs = append(labelPairs, LabelPair{
-			Name:  []byte(k),
-			Value: []byte(v),
-		})
-	}
-	sort.Sort(byLabel(labelPairs)) // The labels should be sorted upon initialisation.
-	return labelPairs
+// FromLabelAdaptersToLabels casts []LabelAdapter to labels.Labels.
+// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
+// This allows us to use labels.Labels directly in protos.
+func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels {
+	return *(*labels.Labels)(unsafe.Pointer(&ls))
 }
 
-type byLabel []LabelPair
-
-func (s byLabel) Len() int           { return len(s) }
-func (s byLabel) Less(i, j int) bool { return bytes.Compare(s[i].Name, s[j].Name) < 0 }
-func (s byLabel) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-// FromLabelPairs unpack a []LabelPair to a model.Metric
-func FromLabelPairs(labelPairs []LabelPair) model.Metric {
-	metric := make(model.Metric, len(labelPairs))
-	for _, l := range labelPairs {
-		metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
-	}
-	return metric
+// FromLabelsToLabelAdapaters casts labels.Labels to []LabelAdapter.
+// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
+// This allows us to use labels.Labels directly in protos.
+func FromLabelsToLabelAdapaters(ls labels.Labels) []LabelAdapter {
+	return *(*[]LabelAdapter)(unsafe.Pointer(&ls))
 }
 
-// FromLabelPairsToLabels unpack a []LabelPair to a labels.Labels
-func FromLabelPairsToLabels(labelPairs []LabelPair) labels.Labels {
-	ls := make(labels.Labels, 0, len(labelPairs))
-	for _, l := range labelPairs {
-		ls = append(ls, labels.Label{
-			Name:  string(l.Name),
-			Value: string(l.Value),
-		})
+// FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric.
+// Don't do this on any performance sensitive paths.
+func FromLabelAdaptersToMetric(ls []LabelAdapter) model.Metric {
+	result := make(model.Metric, len(ls))
+	for _, l := range ls {
+		result[model.LabelName(l.Name)] = model.LabelValue(l.Value)
 	}
-	return ls
+	return result
 }
 
-// FromLabelsToLabelPairs converts labels.Labels to []LabelPair
-func FromLabelsToLabelPairs(s labels.Labels) []LabelPair {
-	labelPairs := make([]LabelPair, 0, len(s))
-	for _, v := range s {
-		labelPairs = append(labelPairs, LabelPair{
-			Name:  []byte(v.Name),
-			Value: []byte(v.Value),
+// FromMetricsToLabelAdapters converts model.Metric to []LabelAdapter.
+// Don't do this on any performance sensitive paths.
+// The result is sorted.
+func FromMetricsToLabelAdapters(metric model.Metric) []LabelAdapter {
+	result := make([]LabelAdapter, 0, len(metric))
+	for k, v := range metric {
+		result = append(result, LabelAdapter{
+			Name:  string(k),
+			Value: string(v),
 		})
 	}
-	return labelPairs // note already sorted
+	sort.Sort(byLabel(result)) // The labels should be sorted upon initialisation.
+	return result
 }
 
+type byLabel []LabelAdapter
+
+func (s byLabel) Len() int           { return len(s) }
+func (s byLabel) Less(i, j int) bool { return strings.Compare(s[i].Name, s[j].Name) < 0 }
+func (s byLabel) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
 // FastFingerprint runs the same algorithm as Prometheus labelSetToFastFingerprint()
-func FastFingerprint(labelPairs []LabelPair) model.Fingerprint {
-	if len(labelPairs) == 0 {
+func FastFingerprint(ls []LabelAdapter) model.Fingerprint {
+	if len(ls) == 0 {
 		return model.Metric(nil).FastFingerprint()
 	}
 
 	var result uint64
-	for _, pair := range labelPairs {
+	for _, l := range ls {
 		sum := hashNew()
-		sum = hashAdd(sum, pair.Name)
+		sum = hashAdd(sum, l.Name)
 		sum = hashAddByte(sum, model.SeparatorByte)
-		sum = hashAdd(sum, pair.Value)
+		sum = hashAdd(sum, l.Value)
 		result ^= sum
 	}
 	return model.Fingerprint(result)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
index a7d5490231d1a7a5d690421e0c54540b5daf7ac5..b7a9689fb025bd1ccb9682048d042f99ee82d812 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
@@ -3,29 +3,21 @@
 
 package client
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire"
-
-import strconv "strconv"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
 import (
-	context "golang.org/x/net/context"
+	bytes "bytes"
+	context "context"
+	encoding_binary "encoding/binary"
+	fmt "fmt"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/gogo/protobuf/proto"
 	grpc "google.golang.org/grpc"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strconv "strconv"
+	strings "strings"
 )
 
-import encoding_binary "encoding/binary"
-
-import io "io"
-
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
 var _ = fmt.Errorf
@@ -52,6 +44,7 @@ var MatchType_name = map[int32]string{
 	2: "REGEX_MATCH",
 	3: "REGEX_NO_MATCH",
 }
+
 var MatchType_value = map[string]int32{
 	"EQUAL":          0,
 	"NOT_EQUAL":      1,
@@ -60,7 +53,7 @@ var MatchType_value = map[string]int32{
 }
 
 func (MatchType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{0}
+	return fileDescriptor_db0f8a1e534b119a, []int{0}
 }
 
 type WriteRequest_SourceEnum int32
@@ -74,24 +67,25 @@ var WriteRequest_SourceEnum_name = map[int32]string{
 	0: "API",
 	1: "RULE",
 }
+
 var WriteRequest_SourceEnum_value = map[string]int32{
 	"API":  0,
 	"RULE": 1,
 }
 
 func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{0, 0}
+	return fileDescriptor_db0f8a1e534b119a, []int{0, 0}
 }
 
 type WriteRequest struct {
-	Timeseries []PreallocTimeseries    `protobuf:"bytes,1,rep,name=timeseries,customtype=PreallocTimeseries" json:"timeseries"`
+	Timeseries []PreallocTimeseries    `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
 	Source     WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"`
 }
 
 func (m *WriteRequest) Reset()      { *m = WriteRequest{} }
 func (*WriteRequest) ProtoMessage() {}
 func (*WriteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{0}
+	return fileDescriptor_db0f8a1e534b119a, []int{0}
 }
 func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -108,8 +102,8 @@ func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *WriteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WriteRequest.Merge(dst, src)
+func (m *WriteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteRequest.Merge(m, src)
 }
 func (m *WriteRequest) XXX_Size() int {
 	return m.Size()
@@ -133,7 +127,7 @@ type WriteResponse struct {
 func (m *WriteResponse) Reset()      { *m = WriteResponse{} }
 func (*WriteResponse) ProtoMessage() {}
 func (*WriteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{1}
+	return fileDescriptor_db0f8a1e534b119a, []int{1}
 }
 func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -150,8 +144,8 @@ func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
 		return b[:n], nil
 	}
 }
-func (dst *WriteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WriteResponse.Merge(dst, src)
+func (m *WriteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WriteResponse.Merge(m, src)
 }
 func (m *WriteResponse) XXX_Size() int {
 	return m.Size()
@@ -163,13 +157,13 @@ func (m *WriteResponse) XXX_DiscardUnknown() {
 var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
 
 type ReadRequest struct {
-	Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"`
+	Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
 }
 
 func (m *ReadRequest) Reset()      { *m = ReadRequest{} }
 func (*ReadRequest) ProtoMessage() {}
 func (*ReadRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{2}
+	return fileDescriptor_db0f8a1e534b119a, []int{2}
 }
 func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -186,8 +180,8 @@ func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *ReadRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ReadRequest.Merge(dst, src)
+func (m *ReadRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReadRequest.Merge(m, src)
 }
 func (m *ReadRequest) XXX_Size() int {
 	return m.Size()
@@ -206,13 +200,13 @@ func (m *ReadRequest) GetQueries() []*QueryRequest {
 }
 
 type ReadResponse struct {
-	Results []*QueryResponse `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
+	Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
 }
 
 func (m *ReadResponse) Reset()      { *m = ReadResponse{} }
 func (*ReadResponse) ProtoMessage() {}
 func (*ReadResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{3}
+	return fileDescriptor_db0f8a1e534b119a, []int{3}
 }
 func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -229,8 +223,8 @@ func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *ReadResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ReadResponse.Merge(dst, src)
+func (m *ReadResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReadResponse.Merge(m, src)
 }
 func (m *ReadResponse) XXX_Size() int {
 	return m.Size()
@@ -251,13 +245,13 @@ func (m *ReadResponse) GetResults() []*QueryResponse {
 type QueryRequest struct {
 	StartTimestampMs int64           `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
 	EndTimestampMs   int64           `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
-	Matchers         []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
+	Matchers         []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"`
 }
 
 func (m *QueryRequest) Reset()      { *m = QueryRequest{} }
 func (*QueryRequest) ProtoMessage() {}
 func (*QueryRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{4}
+	return fileDescriptor_db0f8a1e534b119a, []int{4}
 }
 func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -274,8 +268,8 @@ func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *QueryRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_QueryRequest.Merge(dst, src)
+func (m *QueryRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QueryRequest.Merge(m, src)
 }
 func (m *QueryRequest) XXX_Size() int {
 	return m.Size()
@@ -308,13 +302,13 @@ func (m *QueryRequest) GetMatchers() []*LabelMatcher {
 }
 
 type QueryResponse struct {
-	Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"`
+	Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
 }
 
 func (m *QueryResponse) Reset()      { *m = QueryResponse{} }
 func (*QueryResponse) ProtoMessage() {}
 func (*QueryResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{5}
+	return fileDescriptor_db0f8a1e534b119a, []int{5}
 }
 func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -331,8 +325,8 @@ func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
 		return b[:n], nil
 	}
 }
-func (dst *QueryResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_QueryResponse.Merge(dst, src)
+func (m *QueryResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QueryResponse.Merge(m, src)
 }
 func (m *QueryResponse) XXX_Size() int {
 	return m.Size()
@@ -352,13 +346,13 @@ func (m *QueryResponse) GetTimeseries() []TimeSeries {
 
 // QueryStreamResponse contains a batch of timeseries chunks.
 type QueryStreamResponse struct {
-	Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"`
+	Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
 }
 
 func (m *QueryStreamResponse) Reset()      { *m = QueryStreamResponse{} }
 func (*QueryStreamResponse) ProtoMessage() {}
 func (*QueryStreamResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{6}
+	return fileDescriptor_db0f8a1e534b119a, []int{6}
 }
 func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -375,8 +369,8 @@ func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *QueryStreamResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_QueryStreamResponse.Merge(dst, src)
+func (m *QueryStreamResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QueryStreamResponse.Merge(m, src)
 }
 func (m *QueryStreamResponse) XXX_Size() int {
 	return m.Size()
@@ -401,7 +395,7 @@ type LabelValuesRequest struct {
 func (m *LabelValuesRequest) Reset()      { *m = LabelValuesRequest{} }
 func (*LabelValuesRequest) ProtoMessage() {}
 func (*LabelValuesRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{7}
+	return fileDescriptor_db0f8a1e534b119a, []int{7}
 }
 func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -418,8 +412,8 @@ func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *LabelValuesRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelValuesRequest.Merge(dst, src)
+func (m *LabelValuesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelValuesRequest.Merge(m, src)
 }
 func (m *LabelValuesRequest) XXX_Size() int {
 	return m.Size()
@@ -438,13 +432,13 @@ func (m *LabelValuesRequest) GetLabelName() string {
 }
 
 type LabelValuesResponse struct {
-	LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
+	LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
 }
 
 func (m *LabelValuesResponse) Reset()      { *m = LabelValuesResponse{} }
 func (*LabelValuesResponse) ProtoMessage() {}
 func (*LabelValuesResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{8}
+	return fileDescriptor_db0f8a1e534b119a, []int{8}
 }
 func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -461,8 +455,8 @@ func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *LabelValuesResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelValuesResponse.Merge(dst, src)
+func (m *LabelValuesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelValuesResponse.Merge(m, src)
 }
 func (m *LabelValuesResponse) XXX_Size() int {
 	return m.Size()
@@ -486,7 +480,7 @@ type LabelNamesRequest struct {
 func (m *LabelNamesRequest) Reset()      { *m = LabelNamesRequest{} }
 func (*LabelNamesRequest) ProtoMessage() {}
 func (*LabelNamesRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{9}
+	return fileDescriptor_db0f8a1e534b119a, []int{9}
 }
 func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -503,8 +497,8 @@ func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
 		return b[:n], nil
 	}
 }
-func (dst *LabelNamesRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelNamesRequest.Merge(dst, src)
+func (m *LabelNamesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelNamesRequest.Merge(m, src)
 }
 func (m *LabelNamesRequest) XXX_Size() int {
 	return m.Size()
@@ -516,13 +510,13 @@ func (m *LabelNamesRequest) XXX_DiscardUnknown() {
 var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo
 
 type LabelNamesResponse struct {
-	LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames" json:"label_names,omitempty"`
+	LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"`
 }
 
 func (m *LabelNamesResponse) Reset()      { *m = LabelNamesResponse{} }
 func (*LabelNamesResponse) ProtoMessage() {}
 func (*LabelNamesResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{10}
+	return fileDescriptor_db0f8a1e534b119a, []int{10}
 }
 func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -539,8 +533,8 @@ func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *LabelNamesResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelNamesResponse.Merge(dst, src)
+func (m *LabelNamesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelNamesResponse.Merge(m, src)
 }
 func (m *LabelNamesResponse) XXX_Size() int {
 	return m.Size()
@@ -564,7 +558,7 @@ type UserStatsRequest struct {
 func (m *UserStatsRequest) Reset()      { *m = UserStatsRequest{} }
 func (*UserStatsRequest) ProtoMessage() {}
 func (*UserStatsRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{11}
+	return fileDescriptor_db0f8a1e534b119a, []int{11}
 }
 func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -581,8 +575,8 @@ func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
 		return b[:n], nil
 	}
 }
-func (dst *UserStatsRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UserStatsRequest.Merge(dst, src)
+func (m *UserStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UserStatsRequest.Merge(m, src)
 }
 func (m *UserStatsRequest) XXX_Size() int {
 	return m.Size()
@@ -603,7 +597,7 @@ type UserStatsResponse struct {
 func (m *UserStatsResponse) Reset()      { *m = UserStatsResponse{} }
 func (*UserStatsResponse) ProtoMessage() {}
 func (*UserStatsResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{12}
+	return fileDescriptor_db0f8a1e534b119a, []int{12}
 }
 func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -620,8 +614,8 @@ func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
 		return b[:n], nil
 	}
 }
-func (dst *UserStatsResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UserStatsResponse.Merge(dst, src)
+func (m *UserStatsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UserStatsResponse.Merge(m, src)
 }
 func (m *UserStatsResponse) XXX_Size() int {
 	return m.Size()
@@ -662,13 +656,13 @@ func (m *UserStatsResponse) GetRuleIngestionRate() float64 {
 
 type UserIDStatsResponse struct {
 	UserId string             `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
-	Data   *UserStatsResponse `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+	Data   *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
 }
 
 func (m *UserIDStatsResponse) Reset()      { *m = UserIDStatsResponse{} }
 func (*UserIDStatsResponse) ProtoMessage() {}
 func (*UserIDStatsResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{13}
+	return fileDescriptor_db0f8a1e534b119a, []int{13}
 }
 func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -685,8 +679,8 @@ func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *UserIDStatsResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UserIDStatsResponse.Merge(dst, src)
+func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UserIDStatsResponse.Merge(m, src)
 }
 func (m *UserIDStatsResponse) XXX_Size() int {
 	return m.Size()
@@ -712,13 +706,13 @@ func (m *UserIDStatsResponse) GetData() *UserStatsResponse {
 }
 
 type UsersStatsResponse struct {
-	Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats" json:"stats,omitempty"`
+	Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"`
 }
 
 func (m *UsersStatsResponse) Reset()      { *m = UsersStatsResponse{} }
 func (*UsersStatsResponse) ProtoMessage() {}
 func (*UsersStatsResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{14}
+	return fileDescriptor_db0f8a1e534b119a, []int{14}
 }
 func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -735,8 +729,8 @@ func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
 		return b[:n], nil
 	}
 }
-func (dst *UsersStatsResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_UsersStatsResponse.Merge(dst, src)
+func (m *UsersStatsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UsersStatsResponse.Merge(m, src)
 }
 func (m *UsersStatsResponse) XXX_Size() int {
 	return m.Size()
@@ -757,13 +751,13 @@ func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse {
 type MetricsForLabelMatchersRequest struct {
 	StartTimestampMs int64            `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
 	EndTimestampMs   int64            `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
-	MatchersSet      []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet" json:"matchers_set,omitempty"`
+	MatchersSet      []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"`
 }
 
 func (m *MetricsForLabelMatchersRequest) Reset()      { *m = MetricsForLabelMatchersRequest{} }
 func (*MetricsForLabelMatchersRequest) ProtoMessage() {}
 func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{15}
+	return fileDescriptor_db0f8a1e534b119a, []int{15}
 }
 func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -780,8 +774,8 @@ func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic boo
 		return b[:n], nil
 	}
 }
-func (dst *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(dst, src)
+func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src)
 }
 func (m *MetricsForLabelMatchersRequest) XXX_Size() int {
 	return m.Size()
@@ -814,13 +808,13 @@ func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers {
 }
 
 type MetricsForLabelMatchersResponse struct {
-	Metric []*Metric `protobuf:"bytes,1,rep,name=metric" json:"metric,omitempty"`
+	Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"`
 }
 
 func (m *MetricsForLabelMatchersResponse) Reset()      { *m = MetricsForLabelMatchersResponse{} }
 func (*MetricsForLabelMatchersResponse) ProtoMessage() {}
 func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{16}
+	return fileDescriptor_db0f8a1e534b119a, []int{16}
 }
 func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -837,8 +831,8 @@ func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bo
 		return b[:n], nil
 	}
 }
-func (dst *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(dst, src)
+func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src)
 }
 func (m *MetricsForLabelMatchersResponse) XXX_Size() int {
 	return m.Size()
@@ -857,16 +851,16 @@ func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric {
 }
 
 type TimeSeriesChunk struct {
-	FromIngesterId string      `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"`
-	UserId         string      `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
-	Labels         []LabelPair `protobuf:"bytes,3,rep,name=labels" json:"labels"`
-	Chunks         []Chunk     `protobuf:"bytes,4,rep,name=chunks" json:"chunks"`
+	FromIngesterId string         `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"`
+	UserId         string         `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
+	Labels         []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
+	Chunks         []Chunk        `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"`
 }
 
 func (m *TimeSeriesChunk) Reset()      { *m = TimeSeriesChunk{} }
 func (*TimeSeriesChunk) ProtoMessage() {}
 func (*TimeSeriesChunk) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{17}
+	return fileDescriptor_db0f8a1e534b119a, []int{17}
 }
 func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -883,8 +877,8 @@ func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
 		return b[:n], nil
 	}
 }
-func (dst *TimeSeriesChunk) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TimeSeriesChunk.Merge(dst, src)
+func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TimeSeriesChunk.Merge(m, src)
 }
 func (m *TimeSeriesChunk) XXX_Size() int {
 	return m.Size()
@@ -909,13 +903,6 @@ func (m *TimeSeriesChunk) GetUserId() string {
 	return ""
 }
 
-func (m *TimeSeriesChunk) GetLabels() []LabelPair {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
 func (m *TimeSeriesChunk) GetChunks() []Chunk {
 	if m != nil {
 		return m.Chunks
@@ -933,7 +920,7 @@ type Chunk struct {
 func (m *Chunk) Reset()      { *m = Chunk{} }
 func (*Chunk) ProtoMessage() {}
 func (*Chunk) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{18}
+	return fileDescriptor_db0f8a1e534b119a, []int{18}
 }
 func (m *Chunk) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -950,8 +937,8 @@ func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *Chunk) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Chunk.Merge(dst, src)
+func (m *Chunk) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Chunk.Merge(m, src)
 }
 func (m *Chunk) XXX_Size() int {
 	return m.Size()
@@ -996,7 +983,7 @@ type TransferChunksResponse struct {
 func (m *TransferChunksResponse) Reset()      { *m = TransferChunksResponse{} }
 func (*TransferChunksResponse) ProtoMessage() {}
 func (*TransferChunksResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{19}
+	return fileDescriptor_db0f8a1e534b119a, []int{19}
 }
 func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1013,8 +1000,8 @@ func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
 		return b[:n], nil
 	}
 }
-func (dst *TransferChunksResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TransferChunksResponse.Merge(dst, src)
+func (m *TransferChunksResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TransferChunksResponse.Merge(m, src)
 }
 func (m *TransferChunksResponse) XXX_Size() int {
 	return m.Size()
@@ -1026,15 +1013,15 @@ func (m *TransferChunksResponse) XXX_DiscardUnknown() {
 var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo
 
 type TimeSeries struct {
-	Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"`
+	Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
 	// Sorted by time, oldest sample first.
-	Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"`
+	Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
 }
 
 func (m *TimeSeries) Reset()      { *m = TimeSeries{} }
 func (*TimeSeries) ProtoMessage() {}
 func (*TimeSeries) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{20}
+	return fileDescriptor_db0f8a1e534b119a, []int{20}
 }
 func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1051,8 +1038,8 @@ func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *TimeSeries) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TimeSeries.Merge(dst, src)
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TimeSeries.Merge(m, src)
 }
 func (m *TimeSeries) XXX_Size() int {
 	return m.Size()
@@ -1063,13 +1050,6 @@ func (m *TimeSeries) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
 
-func (m *TimeSeries) GetLabels() []LabelPair {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
 func (m *TimeSeries) GetSamples() []Sample {
 	if m != nil {
 		return m.Samples
@@ -1078,14 +1058,14 @@ func (m *TimeSeries) GetSamples() []Sample {
 }
 
 type LabelPair struct {
-	Name  github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=name,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"name"`
-	Value github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"value"`
+	Name  []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
 }
 
 func (m *LabelPair) Reset()      { *m = LabelPair{} }
 func (*LabelPair) ProtoMessage() {}
 func (*LabelPair) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{21}
+	return fileDescriptor_db0f8a1e534b119a, []int{21}
 }
 func (m *LabelPair) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1102,8 +1082,8 @@ func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *LabelPair) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelPair.Merge(dst, src)
+func (m *LabelPair) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelPair.Merge(m, src)
 }
 func (m *LabelPair) XXX_Size() int {
 	return m.Size()
@@ -1114,6 +1094,20 @@ func (m *LabelPair) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_LabelPair proto.InternalMessageInfo
 
+func (m *LabelPair) GetName() []byte {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *LabelPair) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
 type Sample struct {
 	Value       float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
 	TimestampMs int64   `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
@@ -1122,7 +1116,7 @@ type Sample struct {
 func (m *Sample) Reset()      { *m = Sample{} }
 func (*Sample) ProtoMessage() {}
 func (*Sample) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{22}
+	return fileDescriptor_db0f8a1e534b119a, []int{22}
 }
 func (m *Sample) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1139,8 +1133,8 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *Sample) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Sample.Merge(dst, src)
+func (m *Sample) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Sample.Merge(m, src)
 }
 func (m *Sample) XXX_Size() int {
 	return m.Size()
@@ -1166,13 +1160,13 @@ func (m *Sample) GetTimestampMs() int64 {
 }
 
 type LabelMatchers struct {
-	Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
+	Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"`
 }
 
 func (m *LabelMatchers) Reset()      { *m = LabelMatchers{} }
 func (*LabelMatchers) ProtoMessage() {}
 func (*LabelMatchers) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{23}
+	return fileDescriptor_db0f8a1e534b119a, []int{23}
 }
 func (m *LabelMatchers) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1189,8 +1183,8 @@ func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
 		return b[:n], nil
 	}
 }
-func (dst *LabelMatchers) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelMatchers.Merge(dst, src)
+func (m *LabelMatchers) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelMatchers.Merge(m, src)
 }
 func (m *LabelMatchers) XXX_Size() int {
 	return m.Size()
@@ -1209,13 +1203,13 @@ func (m *LabelMatchers) GetMatchers() []*LabelMatcher {
 }
 
 type Metric struct {
-	Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"`
+	Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
 }
 
 func (m *Metric) Reset()      { *m = Metric{} }
 func (*Metric) ProtoMessage() {}
 func (*Metric) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{24}
+	return fileDescriptor_db0f8a1e534b119a, []int{24}
 }
 func (m *Metric) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1232,8 +1226,8 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *Metric) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metric.Merge(dst, src)
+func (m *Metric) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metric.Merge(m, src)
 }
 func (m *Metric) XXX_Size() int {
 	return m.Size()
@@ -1244,13 +1238,6 @@ func (m *Metric) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_Metric proto.InternalMessageInfo
 
-func (m *Metric) GetLabels() []LabelPair {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
 type LabelMatcher struct {
 	Type  MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"`
 	Name  string    `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
@@ -1260,7 +1247,7 @@ type LabelMatcher struct {
 func (m *LabelMatcher) Reset()      { *m = LabelMatcher{} }
 func (*LabelMatcher) ProtoMessage() {}
 func (*LabelMatcher) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cortex_dc30309a17c87a98, []int{25}
+	return fileDescriptor_db0f8a1e534b119a, []int{25}
 }
 func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1277,8 +1264,8 @@ func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *LabelMatcher) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LabelMatcher.Merge(dst, src)
+func (m *LabelMatcher) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelMatcher.Merge(m, src)
 }
 func (m *LabelMatcher) XXX_Size() int {
 	return m.Size()
@@ -1311,6 +1298,8 @@ func (m *LabelMatcher) GetValue() string {
 }
 
 func init() {
+	proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
+	proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
 	proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest")
 	proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse")
 	proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
@@ -1337,9 +1326,93 @@ func init() {
 	proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers")
 	proto.RegisterType((*Metric)(nil), "cortex.Metric")
 	proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
-	proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
-	proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
 }
+
+func init() {
+	proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_db0f8a1e534b119a)
+}
+
+var fileDescriptor_db0f8a1e534b119a = []byte{
+	// 1231 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
+	0x17, 0xdf, 0x8d, 0x7f, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0x92, 0x7e, 0x9b, 0xba, 0xfa, 0x6e, 0xca,
+	0x48, 0x2d, 0x11, 0x50, 0xa7, 0xa4, 0x2a, 0xf4, 0x40, 0x55, 0x9c, 0x36, 0x6d, 0x8d, 0x92, 0x34,
+	0x1d, 0xbb, 0x80, 0x90, 0xd0, 0x6a, 0x63, 0x4f, 0x9d, 0xa5, 0xfb, 0xc3, 0x9d, 0x99, 0x45, 0xf4,
+	0x80, 0xc4, 0x7f, 0x00, 0x47, 0xf8, 0x0f, 0x38, 0x73, 0x81, 0x33, 0xa7, 0x1e, 0x7b, 0xac, 0x38,
+	0x54, 0xd4, 0xbd, 0x70, 0xec, 0x9f, 0x80, 0x76, 0x66, 0x76, 0xbd, 0xeb, 0xda, 0xa2, 0x02, 0xf5,
+	0xe6, 0x79, 0xef, 0xf3, 0x3e, 0xf3, 0xe6, 0xfd, 0x5c, 0xc3, 0xc7, 0x43, 0x57, 0x1c, 0x47, 0x47,
+	0xad, 0x7e, 0xe8, 0x6f, 0xf5, 0x43, 0x26, 0xe8, 0x37, 0x23, 0x16, 0x7e, 0x45, 0xfb, 0x42, 0x9f,
+	0xb6, 0x46, 0x0f, 0x86, 0x5b, 0x6e, 0x30, 0xa4, 0x5c, 0x50, 0xb6, 0xd5, 0xf7, 0x5c, 0x1a, 0x24,
+	0xaa, 0xd6, 0x88, 0x85, 0x22, 0x44, 0x65, 0x75, 0x6a, 0x5e, 0xc8, 0x30, 0x0d, 0xc3, 0x61, 0xb8,
+	0x25, 0xd5, 0x47, 0xd1, 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x4b, 0x99, 0xe1, 0xdf, 0x4c, 0xa8, 0x7d,
+	0xc6, 0x5c, 0x41, 0x09, 0x7d, 0x18, 0x51, 0x2e, 0xd0, 0x01, 0x80, 0x70, 0x7d, 0xca, 0x29, 0x73,
+	0x29, 0x5f, 0x37, 0xcf, 0x16, 0x36, 0xab, 0xdb, 0xa8, 0xa5, 0xaf, 0xea, 0xb9, 0x3e, 0xed, 0x4a,
+	0xcd, 0x4e, 0xf3, 0xf1, 0xb3, 0x0d, 0xe3, 0x8f, 0x67, 0x1b, 0xe8, 0x90, 0x51, 0xc7, 0xf3, 0xc2,
+	0x7e, 0x2f, 0xb5, 0x22, 0x19, 0x06, 0xf4, 0x21, 0x94, 0xbb, 0x61, 0xc4, 0xfa, 0x74, 0x7d, 0xe1,
+	0xac, 0xb9, 0x59, 0xdf, 0xde, 0x48, 0xb8, 0xb2, 0xb7, 0xb6, 0x14, 0x64, 0x37, 0x88, 0x7c, 0x52,
+	0xe6, 0xf2, 0x37, 0xde, 0x00, 0x98, 0x48, 0xd1, 0x22, 0x14, 0xda, 0x87, 0x9d, 0x86, 0x81, 0x96,
+	0xa0, 0x48, 0xee, 0xed, 0xed, 0x36, 0x4c, 0x7c, 0x02, 0x96, 0x35, 0x07, 0x1f, 0x85, 0x01, 0xa7,
+	0xf8, 0x2a, 0x54, 0x09, 0x75, 0x06, 0xc9, 0x4b, 0x5a, 0xb0, 0xf8, 0x30, 0xca, 0x3e, 0x63, 0x2d,
+	0xb9, 0xfa, 0x6e, 0x44, 0xd9, 0x23, 0x0d, 0x23, 0x09, 0x08, 0x5f, 0x83, 0x9a, 0x32, 0x57, 0x74,
+	0x68, 0x0b, 0x16, 0x19, 0xe5, 0x91, 0x27, 0x12, 0xfb, 0x93, 0x53, 0xf6, 0x0a, 0x47, 0x12, 0x14,
+	0xfe, 0xd1, 0x84, 0x5a, 0x96, 0x1a, 0xbd, 0x07, 0x88, 0x0b, 0x87, 0x09, 0x5b, 0xc6, 0x43, 0x38,
+	0xfe, 0xc8, 0xf6, 0x63, 0x32, 0x73, 0xb3, 0x40, 0x1a, 0x52, 0xd3, 0x4b, 0x14, 0xfb, 0x1c, 0x6d,
+	0x42, 0x83, 0x06, 0x83, 0x3c, 0x76, 0x41, 0x62, 0xeb, 0x34, 0x18, 0x64, 0x91, 0x17, 0x61, 0xc9,
+	0x77, 0x44, 0xff, 0x98, 0x32, 0xbe, 0x5e, 0xc8, 0x3f, 0x6d, 0xcf, 0x39, 0xa2, 0xde, 0xbe, 0x52,
+	0x92, 0x14, 0x85, 0x3b, 0xb0, 0x9c, 0x73, 0x1a, 0x5d, 0x79, 0xcd, 0x34, 0x17, 0xe3, 0x34, 0x67,
+	0x13, 0x8a, 0x7b, 0xb0, 0x2a, 0xa9, 0xba, 0x82, 0x51, 0xc7, 0x4f, 0x09, 0xaf, 0xce, 0x20, 0x3c,
+	0xf5, 0x2a, 0xe1, 0xf5, 0xe3, 0x28, 0x78, 0x30, 0x83, 0xf5, 0x12, 0x20, 0xe9, 0xfa, 0xa7, 0x8e,
+	0x17, 0x51, 0x9e, 0x04, 0xf0, 0xff, 0x00, 0x5e, 0x2c, 0xb5, 0x03, 0xc7, 0xa7, 0x32, 0x70, 0x15,
+	0x52, 0x91, 0x92, 0x03, 0xc7, 0xa7, 0xf8, 0x0a, 0xac, 0xe6, 0x8c, 0xb4, 0x2b, 0x6f, 0x41, 0x4d,
+	0x59, 0x7d, 0x2d, 0xe5, 0xd2, 0x99, 0x0a, 0xa9, 0x7a, 0x13, 0x28, 0x5e, 0x85, 0x95, 0xbd, 0x84,
+	0x26, 0xb9, 0x0d, 0x5f, 0xd6, 0x3e, 0x68, 0xa1, 0x66, 0xdb, 0x80, 0xea, 0xc4, 0x87, 0x84, 0x0c,
+	0x52, 0x27, 0x38, 0x46, 0xd0, 0xb8, 0xc7, 0x29, 0xeb, 0x0a, 0x47, 0xa4, 0x54, 0xbf, 0x9a, 0xb0,
+	0x92, 0x11, 0x6a, 0xaa, 0x73, 0x50, 0x57, 0x3d, 0xec, 0x86, 0x81, 0xcd, 0x1c, 0xa1, 0x9e, 0x64,
+	0x92, 0xe5, 0x54, 0x4a, 0x1c, 0x41, 0xe3, 0x57, 0x07, 0x91, 0x6f, 0xeb, 0x50, 0xc6, 0x25, 0x50,
+	0x24, 0x95, 0x20, 0xf2, 0x55, 0x04, 0xe3, 0xaa, 0x72, 0x46, 0xae, 0x3d, 0xc5, 0x54, 0x90, 0x4c,
+	0x0d, 0x67, 0xe4, 0x76, 0x72, 0x64, 0x2d, 0x58, 0x65, 0x91, 0x47, 0xa7, 0xe1, 0x45, 0x09, 0x5f,
+	0x89, 0x55, 0x39, 0x3c, 0xfe, 0x12, 0x56, 0x63, 0xc7, 0x3b, 0x37, 0xf2, 0xae, 0x9f, 0x82, 0xc5,
+	0x88, 0x53, 0x66, 0xbb, 0x03, 0x9d, 0x86, 0x72, 0x7c, 0xec, 0x0c, 0xd0, 0x05, 0x28, 0x0e, 0x1c,
+	0xe1, 0x48, 0x37, 0xab, 0xdb, 0xa7, 0x93, 0x8c, 0xbf, 0xf2, 0x78, 0x22, 0x61, 0xf8, 0x16, 0xa0,
+	0x58, 0xc5, 0xf3, 0xec, 0xef, 0x43, 0x89, 0xc7, 0x02, 0x5d, 0x37, 0x67, 0xb2, 0x2c, 0x53, 0x9e,
+	0x10, 0x85, 0xc4, 0xbf, 0x98, 0x60, 0xed, 0x53, 0xc1, 0xdc, 0x3e, 0xbf, 0x19, 0xb2, 0x6c, 0xd9,
+	0xf3, 0x37, 0xdd, 0x7e, 0x57, 0xa0, 0x96, 0x34, 0x96, 0xcd, 0xa9, 0xd0, 0x2d, 0x78, 0x72, 0x56,
+	0x0b, 0x72, 0x52, 0x4d, 0xa0, 0x5d, 0x2a, 0x70, 0x07, 0x36, 0xe6, 0xfa, 0xac, 0x43, 0x71, 0x1e,
+	0xca, 0xbe, 0x84, 0xe8, 0x58, 0xd4, 0x13, 0x5a, 0x65, 0x48, 0xb4, 0x16, 0xff, 0x6e, 0xc2, 0x89,
+	0xa9, 0xb6, 0x8a, 0x9f, 0x70, 0x9f, 0x85, 0xbe, 0x9d, 0x2c, 0x8a, 0x49, 0xb6, 0xea, 0xb1, 0xbc,
+	0xa3, 0xc5, 0x9d, 0x41, 0x36, 0x9d, 0x0b, 0xb9, 0x74, 0x5e, 0x83, 0xb2, 0x2c, 0xed, 0x64, 0xb0,
+	0xac, 0xe4, 0x5e, 0x75, 0xe8, 0xb8, 0x6c, 0x67, 0x4d, 0x4f, 0xfe, 0x9a, 0x14, 0xb5, 0x07, 0xce,
+	0x48, 0x50, 0x46, 0xb4, 0x19, 0x7a, 0x17, 0xca, 0xfd, 0xd8, 0x19, 0xbe, 0x5e, 0x94, 0x04, 0xcb,
+	0x09, 0x41, 0xb6, 0xf3, 0x35, 0x04, 0x7f, 0x6f, 0x42, 0x49, 0xb9, 0xfe, 0xa6, 0x72, 0xd5, 0x84,
+	0x25, 0x1a, 0xf4, 0xc3, 0x81, 0x1b, 0x0c, 0x65, 0x8b, 0x94, 0x48, 0x7a, 0x46, 0x48, 0x97, 0x6e,
+	0xdc, 0x0b, 0x35, 0x5d, 0x9f, 0xeb, 0xf0, 0xbf, 0x1e, 0x73, 0x02, 0x7e, 0x9f, 0x32, 0xe9, 0x58,
+	0x9a, 0x18, 0xfc, 0x2d, 0xc0, 0x24, 0xde, 0x99, 0x38, 0x99, 0xff, 0x2e, 0x4e, 0x2d, 0x58, 0xe4,
+	0x8e, 0x3f, 0xf2, 0x64, 0x87, 0xe7, 0x12, 0xdd, 0x95, 0x62, 0x1d, 0xa9, 0x04, 0x84, 0x2f, 0x43,
+	0x25, 0xa5, 0x8e, 0x3d, 0x4f, 0x27, 0x62, 0x8d, 0xc8, 0xdf, 0x68, 0x0d, 0x4a, 0x72, 0xde, 0xc9,
+	0x40, 0xd4, 0x88, 0x3a, 0xe0, 0x36, 0x94, 0x15, 0xdf, 0x44, 0xaf, 0x66, 0x8e, 0x3a, 0xc4, 0xb3,
+	0x72, 0x46, 0x14, 0xab, 0x62, 0x12, 0x42, 0xdc, 0x86, 0xe5, 0x5c, 0xa9, 0xe6, 0xd6, 0x8f, 0xf9,
+	0x9a, 0xeb, 0xa7, 0xac, 0xca, 0xf7, 0x3f, 0xc7, 0x0d, 0xdb, 0x50, 0xcb, 0x5e, 0x82, 0xce, 0x41,
+	0x51, 0x3c, 0x1a, 0xa9, 0x57, 0xd5, 0x27, 0x74, 0x52, 0xdd, 0x7b, 0x34, 0xa2, 0x44, 0xaa, 0xd3,
+	0x88, 0xa9, 0x6a, 0x9f, 0x8a, 0x58, 0x41, 0x0a, 0xd5, 0xe1, 0x9d, 0x4f, 0xa0, 0x92, 0x1a, 0xa3,
+	0x0a, 0x94, 0x76, 0xef, 0xde, 0x6b, 0xef, 0x35, 0x0c, 0xb4, 0x0c, 0x95, 0x83, 0x3b, 0x3d, 0x5b,
+	0x1d, 0x4d, 0x74, 0x02, 0xaa, 0x64, 0xf7, 0xd6, 0xee, 0xe7, 0xf6, 0x7e, 0xbb, 0x77, 0xfd, 0x76,
+	0x63, 0x01, 0x21, 0xa8, 0x2b, 0xc1, 0xc1, 0x1d, 0x2d, 0x2b, 0x6c, 0xff, 0x54, 0x82, 0xa5, 0xa4,
+	0xeb, 0xd0, 0x65, 0x28, 0x1e, 0x46, 0xfc, 0x18, 0xad, 0xcd, 0xfa, 0x02, 0x6a, 0x9e, 0x9c, 0x92,
+	0xea, 0xaa, 0x33, 0xd0, 0x07, 0x50, 0x92, 0xfb, 0x16, 0xcd, 0xfc, 0x7c, 0x69, 0xce, 0xfe, 0x28,
+	0xc1, 0x06, 0xba, 0x01, 0xd5, 0xcc, 0x9e, 0x9e, 0x63, 0x7d, 0x26, 0x27, 0xcd, 0xaf, 0x74, 0x6c,
+	0x5c, 0x34, 0xd1, 0x6d, 0xa8, 0x66, 0x56, 0x2c, 0x6a, 0xe6, 0xd2, 0x95, 0x5b, 0xd6, 0x13, 0xae,
+	0x19, 0x3b, 0x19, 0x1b, 0x68, 0x17, 0x60, 0xb2, 0x5d, 0xd1, 0xe9, 0x1c, 0x38, 0xbb, 0x86, 0x9b,
+	0xcd, 0x59, 0xaa, 0x94, 0x66, 0x07, 0x2a, 0xe9, 0x6e, 0x41, 0xeb, 0x33, 0xd6, 0x8d, 0x22, 0x99,
+	0xbf, 0x88, 0xb0, 0x81, 0x6e, 0x42, 0xad, 0xed, 0x79, 0xaf, 0x43, 0xd3, 0xcc, 0x6a, 0xf8, 0x34,
+	0x8f, 0x07, 0xa7, 0xe6, 0x8c, 0x73, 0x74, 0x3e, 0x3f, 0xb6, 0xe7, 0xed, 0xa8, 0xe6, 0xdb, 0xff,
+	0x88, 0x4b, 0x6f, 0xdb, 0x87, 0x7a, 0x7e, 0x34, 0xa1, 0x79, 0xdf, 0x57, 0x4d, 0x2b, 0x55, 0xcc,
+	0x9e, 0x65, 0xc6, 0xa6, 0xb9, 0xf3, 0xd1, 0x93, 0xe7, 0x96, 0xf1, 0xf4, 0xb9, 0x65, 0xbc, 0x7c,
+	0x6e, 0x99, 0xdf, 0x8d, 0x2d, 0xf3, 0xe7, 0xb1, 0x65, 0x3e, 0x1e, 0x5b, 0xe6, 0x93, 0xb1, 0x65,
+	0xfe, 0x39, 0xb6, 0xcc, 0xbf, 0xc6, 0x96, 0xf1, 0x72, 0x6c, 0x99, 0x3f, 0xbc, 0xb0, 0x8c, 0x27,
+	0x2f, 0x2c, 0xe3, 0xe9, 0x0b, 0xcb, 0xf8, 0xa2, 0xac, 0xfe, 0x7b, 0x1c, 0x95, 0xe5, 0xdf, 0x87,
+	0x4b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x95, 0x27, 0x3b, 0x4e, 0xb9, 0x0c, 0x00, 0x00,
+}
+
 func (x MatchType) String() string {
 	s, ok := MatchType_name[int32(x)]
 	if ok {
@@ -1864,7 +1937,7 @@ func (this *TimeSeriesChunk) Equal(that interface{}) bool {
 		return false
 	}
 	for i := range this.Labels {
-		if !this.Labels[i].Equal(&that1.Labels[i]) {
+		if !this.Labels[i].Equal(that1.Labels[i]) {
 			return false
 		}
 	}
@@ -1955,7 +2028,7 @@ func (this *TimeSeries) Equal(that interface{}) bool {
 		return false
 	}
 	for i := range this.Labels {
-		if !this.Labels[i].Equal(&that1.Labels[i]) {
+		if !this.Labels[i].Equal(that1.Labels[i]) {
 			return false
 		}
 	}
@@ -1988,10 +2061,10 @@ func (this *LabelPair) Equal(that interface{}) bool {
 	} else if this == nil {
 		return false
 	}
-	if !this.Name.Equal(that1.Name) {
+	if !bytes.Equal(this.Name, that1.Name) {
 		return false
 	}
-	if !this.Value.Equal(that1.Value) {
+	if !bytes.Equal(this.Value, that1.Value) {
 		return false
 	}
 	return true
@@ -2075,7 +2148,7 @@ func (this *Metric) Equal(that interface{}) bool {
 		return false
 	}
 	for i := range this.Labels {
-		if !this.Labels[i].Equal(&that1.Labels[i]) {
+		if !this.Labels[i].Equal(that1.Labels[i]) {
 			return false
 		}
 	}
@@ -2321,13 +2394,7 @@ func (this *TimeSeriesChunk) GoString() string {
 	s = append(s, "&client.TimeSeriesChunk{")
 	s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n")
 	s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n")
-	if this.Labels != nil {
-		vs := make([]*LabelPair, len(this.Labels))
-		for i := range vs {
-			vs[i] = &this.Labels[i]
-		}
-		s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
+	s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
 	if this.Chunks != nil {
 		vs := make([]*Chunk, len(this.Chunks))
 		for i := range vs {
@@ -2366,13 +2433,7 @@ func (this *TimeSeries) GoString() string {
 	}
 	s := make([]string, 0, 6)
 	s = append(s, "&client.TimeSeries{")
-	if this.Labels != nil {
-		vs := make([]*LabelPair, len(this.Labels))
-		for i := range vs {
-			vs[i] = &this.Labels[i]
-		}
-		s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
+	s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
 	if this.Samples != nil {
 		vs := make([]*Sample, len(this.Samples))
 		for i := range vs {
@@ -2423,13 +2484,7 @@ func (this *Metric) GoString() string {
 	}
 	s := make([]string, 0, 5)
 	s = append(s, "&client.Metric{")
-	if this.Labels != nil {
-		vs := make([]*LabelPair, len(this.Labels))
-		for i := range vs {
-			vs[i] = &this.Labels[i]
-		}
-		s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
-	}
+	s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -3259,9 +3314,9 @@ func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) {
 		dAtA[i] = 0x12
 		i++
 		i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size()))
-		n1, err := m.Data.MarshalTo(dAtA[i:])
-		if err != nil {
-			return 0, err
+		n1, err1 := m.Data.MarshalTo(dAtA[i:])
+		if err1 != nil {
+			return 0, err1
 		}
 		i += n1
 	}
@@ -3536,22 +3591,18 @@ func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
-	dAtA[i] = 0xa
-	i++
-	i = encodeVarintCortex(dAtA, i, uint64(m.Name.Size()))
-	n2, err := m.Name.MarshalTo(dAtA[i:])
-	if err != nil {
-		return 0, err
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintCortex(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
 	}
-	i += n2
-	dAtA[i] = 0x12
-	i++
-	i = encodeVarintCortex(dAtA, i, uint64(m.Value.Size()))
-	n3, err := m.Value.MarshalTo(dAtA[i:])
-	if err != nil {
-		return 0, err
+	if len(m.Value) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintCortex(dAtA, i, uint64(len(m.Value)))
+		i += copy(dAtA[i:], m.Value)
 	}
-	i += n3
 	return i, nil
 }
 
@@ -4033,10 +4084,14 @@ func (m *LabelPair) Size() (n int) {
 	}
 	var l int
 	_ = l
-	l = m.Name.Size()
-	n += 1 + l + sovCortex(uint64(l))
-	l = m.Value.Size()
-	n += 1 + l + sovCortex(uint64(l))
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovCortex(uint64(l))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovCortex(uint64(l))
+	}
 	return n
 }
 
@@ -4142,8 +4197,13 @@ func (this *ReadRequest) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForQueries := "[]*QueryRequest{"
+	for _, f := range this.Queries {
+		repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + ","
+	}
+	repeatedStringForQueries += "}"
 	s := strings.Join([]string{`&ReadRequest{`,
-		`Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`,
+		`Queries:` + repeatedStringForQueries + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4152,8 +4212,13 @@ func (this *ReadResponse) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForResults := "[]*QueryResponse{"
+	for _, f := range this.Results {
+		repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + ","
+	}
+	repeatedStringForResults += "}"
 	s := strings.Join([]string{`&ReadResponse{`,
-		`Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`,
+		`Results:` + repeatedStringForResults + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4162,10 +4227,15 @@ func (this *QueryRequest) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForMatchers := "[]*LabelMatcher{"
+	for _, f := range this.Matchers {
+		repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + ","
+	}
+	repeatedStringForMatchers += "}"
 	s := strings.Join([]string{`&QueryRequest{`,
 		`StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`,
 		`EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`,
-		`Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`,
+		`Matchers:` + repeatedStringForMatchers + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4174,8 +4244,13 @@ func (this *QueryResponse) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForTimeseries := "[]TimeSeries{"
+	for _, f := range this.Timeseries {
+		repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTimeseries += "}"
 	s := strings.Join([]string{`&QueryResponse{`,
-		`Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`,
+		`Timeseries:` + repeatedStringForTimeseries + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4184,8 +4259,13 @@ func (this *QueryStreamResponse) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForTimeseries := "[]TimeSeriesChunk{"
+	for _, f := range this.Timeseries {
+		repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTimeseries += "}"
 	s := strings.Join([]string{`&QueryStreamResponse{`,
-		`Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`,
+		`Timeseries:` + repeatedStringForTimeseries + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4257,7 +4337,7 @@ func (this *UserIDStatsResponse) String() string {
 	}
 	s := strings.Join([]string{`&UserIDStatsResponse{`,
 		`UserId:` + fmt.Sprintf("%v", this.UserId) + `,`,
-		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`,
+		`Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4266,8 +4346,13 @@ func (this *UsersStatsResponse) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForStats := "[]*UserIDStatsResponse{"
+	for _, f := range this.Stats {
+		repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + ","
+	}
+	repeatedStringForStats += "}"
 	s := strings.Join([]string{`&UsersStatsResponse{`,
-		`Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`,
+		`Stats:` + repeatedStringForStats + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4276,10 +4361,15 @@ func (this *MetricsForLabelMatchersRequest) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForMatchersSet := "[]*LabelMatchers{"
+	for _, f := range this.MatchersSet {
+		repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + ","
+	}
+	repeatedStringForMatchersSet += "}"
 	s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`,
 		`StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`,
 		`EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`,
-		`MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`,
+		`MatchersSet:` + repeatedStringForMatchersSet + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4288,8 +4378,13 @@ func (this *MetricsForLabelMatchersResponse) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForMetric := "[]*Metric{"
+	for _, f := range this.Metric {
+		repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + ","
+	}
+	repeatedStringForMetric += "}"
 	s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`,
-		`Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`,
+		`Metric:` + repeatedStringForMetric + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4298,11 +4393,16 @@ func (this *TimeSeriesChunk) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForChunks := "[]Chunk{"
+	for _, f := range this.Chunks {
+		repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForChunks += "}"
 	s := strings.Join([]string{`&TimeSeriesChunk{`,
 		`FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`,
 		`UserId:` + fmt.Sprintf("%v", this.UserId) + `,`,
-		`Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
-		`Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`,
+		`Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+		`Chunks:` + repeatedStringForChunks + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4333,9 +4433,14 @@ func (this *TimeSeries) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForSamples := "[]Sample{"
+	for _, f := range this.Samples {
+		repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSamples += "}"
 	s := strings.Join([]string{`&TimeSeries{`,
-		`Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
-		`Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`,
+		`Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+		`Samples:` + repeatedStringForSamples + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4366,8 +4471,13 @@ func (this *LabelMatchers) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForMatchers := "[]*LabelMatcher{"
+	for _, f := range this.Matchers {
+		repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + ","
+	}
+	repeatedStringForMatchers += "}"
 	s := strings.Join([]string{`&LabelMatchers{`,
-		`Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`,
+		`Matchers:` + repeatedStringForMatchers + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4377,7 +4487,7 @@ func (this *Metric) String() string {
 		return "nil"
 	}
 	s := strings.Join([]string{`&Metric{`,
-		`Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
+		`Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -4417,7 +4527,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4445,7 +4555,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4454,6 +4564,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4476,7 +4589,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Source |= (WriteRequest_SourceEnum(b) & 0x7F) << shift
+				m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4490,6 +4603,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4517,7 +4633,7 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4540,6 +4656,9 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4567,7 +4686,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4595,7 +4714,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4604,6 +4723,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4621,6 +4743,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4648,7 +4773,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4676,7 +4801,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4685,6 +4810,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4702,6 +4830,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4729,7 +4860,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4757,7 +4888,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+				m.StartTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4776,7 +4907,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+				m.EndTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4795,7 +4926,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4804,6 +4935,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4821,6 +4955,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4848,7 +4985,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4876,7 +5013,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4885,6 +5022,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4902,6 +5042,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4929,7 +5072,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -4957,7 +5100,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -4966,6 +5109,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -4983,6 +5129,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5010,7 +5159,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5038,7 +5187,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5048,6 +5197,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5062,6 +5214,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5089,7 +5244,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5117,7 +5272,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5127,6 +5282,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5141,6 +5299,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5168,7 +5329,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5191,6 +5352,9 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5218,7 +5382,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5246,7 +5410,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5256,6 +5420,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5270,6 +5437,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5297,7 +5467,7 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5320,6 +5490,9 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5347,7 +5520,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5386,7 +5559,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.NumSeries |= (uint64(b) & 0x7F) << shift
+				m.NumSeries |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5422,6 +5595,9 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5449,7 +5625,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5477,7 +5653,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5487,6 +5663,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5506,7 +5685,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5515,6 +5694,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5534,6 +5716,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5561,7 +5746,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5589,7 +5774,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5598,6 +5783,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5615,6 +5803,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5642,7 +5833,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5670,7 +5861,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+				m.StartTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5689,7 +5880,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+				m.EndTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5708,7 +5899,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5717,6 +5908,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5734,6 +5928,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5761,7 +5958,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5789,7 +5986,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5798,6 +5995,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5815,6 +6015,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5842,7 +6045,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -5870,7 +6073,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5880,6 +6083,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5899,7 +6105,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5909,6 +6115,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5928,7 +6137,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5937,10 +6146,13 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Labels = append(m.Labels, LabelPair{})
+			m.Labels = append(m.Labels, LabelAdapter{})
 			if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -5959,7 +6171,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5968,6 +6180,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -5985,6 +6200,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6012,7 +6230,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6040,7 +6258,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+				m.StartTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6059,7 +6277,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+				m.EndTimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6078,7 +6296,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Encoding |= (int32(b) & 0x7F) << shift
+				m.Encoding |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6097,7 +6315,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6106,6 +6324,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6123,6 +6344,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6150,7 +6374,7 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6173,6 +6397,9 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6200,7 +6427,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6228,7 +6455,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6237,10 +6464,13 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Labels = append(m.Labels, LabelPair{})
+			m.Labels = append(m.Labels, LabelAdapter{})
 			if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -6259,7 +6489,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6268,6 +6498,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6285,6 +6518,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6312,7 +6548,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6340,7 +6576,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6349,11 +6585,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := m.Name.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
 			}
 			iNdEx = postIndex
 		case 2:
@@ -6370,7 +6610,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				byteLen |= (int(b) & 0x7F) << shift
+				byteLen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6379,11 +6619,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
 			}
 			iNdEx = postIndex
 		default:
@@ -6395,6 +6639,9 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6422,7 +6669,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6461,7 +6708,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.TimestampMs |= (int64(b) & 0x7F) << shift
+				m.TimestampMs |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6475,6 +6722,9 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6502,7 +6752,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6530,7 +6780,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6539,6 +6789,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6556,6 +6809,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6583,7 +6839,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6611,7 +6867,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6620,10 +6876,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Labels = append(m.Labels, LabelPair{})
+			m.Labels = append(m.Labels, LabelAdapter{})
 			if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
@@ -6637,6 +6896,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6664,7 +6926,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -6692,7 +6954,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Type |= (MatchType(b) & 0x7F) << shift
+				m.Type |= MatchType(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6711,7 +6973,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6721,6 +6983,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6740,7 +7005,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -6750,6 +7015,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthCortex
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6764,6 +7032,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthCortex
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -6830,10 +7101,13 @@ func skipCortex(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthCortex
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthCortex
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -6862,6 +7136,9 @@ func skipCortex(dAtA []byte) (n int, err error) {
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthCortex
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -6880,89 +7157,3 @@ var (
 	ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowCortex   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_cortex_dc30309a17c87a98)
-}
-
-var fileDescriptor_cortex_dc30309a17c87a98 = []byte{
-	// 1247 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
-	0x14, 0xdf, 0x8d, 0xff, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0xd2, 0xd2, 0xd4, 0x15, 0xeb, 0x32, 0x52,
-	0x4b, 0x04, 0xd4, 0x2e, 0xa9, 0x0a, 0x45, 0x50, 0x81, 0xd3, 0xba, 0xad, 0x51, 0x92, 0xa6, 0x6b,
-	0x17, 0x10, 0x12, 0x5a, 0x6d, 0xec, 0xa9, 0xb3, 0x74, 0xff, 0xb8, 0x33, 0xb3, 0x40, 0x6e, 0x7c,
-	0x03, 0x38, 0xc2, 0x37, 0xe0, 0x86, 0xc4, 0x05, 0x3e, 0x42, 0x8f, 0x3d, 0x56, 0x1c, 0x2a, 0xea,
-	0x5e, 0x38, 0xf6, 0x23, 0xa0, 0x9d, 0x99, 0x5d, 0xef, 0xba, 0xb6, 0x08, 0x42, 0xbd, 0x79, 0xde,
-	0xfb, 0xbd, 0xdf, 0xbe, 0xbf, 0xf3, 0xc6, 0xf0, 0xc9, 0xc8, 0xe1, 0x87, 0xe1, 0x41, 0x73, 0x10,
-	0x78, 0xad, 0x41, 0x40, 0x39, 0xf9, 0x6e, 0x4c, 0x83, 0xaf, 0xc9, 0x80, 0xab, 0x53, 0x6b, 0xfc,
-	0x60, 0xd4, 0x72, 0xfc, 0x11, 0x61, 0x9c, 0xd0, 0xd6, 0xc0, 0x75, 0x88, 0x1f, 0xab, 0x9a, 0x63,
-	0x1a, 0xf0, 0x00, 0x15, 0xe5, 0xa9, 0x7e, 0x31, 0xc5, 0x34, 0x0a, 0x46, 0x41, 0x4b, 0xa8, 0x0f,
-	0xc2, 0xfb, 0xe2, 0x24, 0x0e, 0xe2, 0x97, 0x34, 0xc3, 0x7f, 0xe8, 0x50, 0xf9, 0x9c, 0x3a, 0x9c,
-	0x98, 0xe4, 0x61, 0x48, 0x18, 0x47, 0x7b, 0x00, 0xdc, 0xf1, 0x08, 0x23, 0xd4, 0x21, 0x6c, 0x43,
-	0x3f, 0x97, 0xdb, 0x2c, 0x6f, 0xa1, 0xa6, 0xfa, 0x54, 0xdf, 0xf1, 0x48, 0x4f, 0x68, 0xb6, 0xeb,
-	0x8f, 0x9e, 0x36, 0xb4, 0x3f, 0x9f, 0x36, 0xd0, 0x3e, 0x25, 0xb6, 0xeb, 0x06, 0x83, 0x7e, 0x62,
-	0x65, 0xa6, 0x18, 0xd0, 0xfb, 0x50, 0xec, 0x05, 0x21, 0x1d, 0x90, 0x8d, 0xa5, 0x73, 0xfa, 0x66,
-	0x75, 0xab, 0x11, 0x73, 0xa5, 0xbf, 0xda, 0x94, 0x90, 0x8e, 0x1f, 0x7a, 0x66, 0x91, 0x89, 0xdf,
-	0xb8, 0x01, 0x30, 0x95, 0xa2, 0x65, 0xc8, 0xb5, 0xf7, 0xbb, 0x35, 0x0d, 0xad, 0x40, 0xde, 0xbc,
-	0xb7, 0xd3, 0xa9, 0xe9, 0xf8, 0x04, 0xac, 0x2a, 0x0e, 0x36, 0x0e, 0x7c, 0x46, 0xf0, 0x35, 0x28,
-	0x9b, 0xc4, 0x1e, 0xc6, 0x91, 0x34, 0x61, 0xf9, 0x61, 0x98, 0x0e, 0xe3, 0x64, 0xfc, 0xe9, 0xbb,
-	0x21, 0xa1, 0x47, 0x0a, 0x66, 0xc6, 0x20, 0xfc, 0x31, 0x54, 0xa4, 0xb9, 0xa4, 0x43, 0x2d, 0x58,
-	0xa6, 0x84, 0x85, 0x2e, 0x8f, 0xed, 0x4f, 0xcd, 0xd8, 0x4b, 0x9c, 0x19, 0xa3, 0xf0, 0x4f, 0x3a,
-	0x54, 0xd2, 0xd4, 0xe8, 0x1d, 0x40, 0x8c, 0xdb, 0x94, 0x5b, 0x22, 0x1f, 0xdc, 0xf6, 0xc6, 0x96,
-	0x17, 0x91, 0xe9, 0x9b, 0x39, 0xb3, 0x26, 0x34, 0xfd, 0x58, 0xb1, 0xcb, 0xd0, 0x26, 0xd4, 0x88,
-	0x3f, 0xcc, 0x62, 0x97, 0x04, 0xb6, 0x4a, 0xfc, 0x61, 0x1a, 0x79, 0x09, 0x56, 0x3c, 0x9b, 0x0f,
-	0x0e, 0x09, 0x65, 0x1b, 0xb9, 0x6c, 0x68, 0x3b, 0xf6, 0x01, 0x71, 0x77, 0xa5, 0xd2, 0x4c, 0x50,
-	0xb8, 0x0b, 0xab, 0x19, 0xa7, 0xd1, 0xd5, 0x63, 0x96, 0x39, 0x1f, 0x95, 0x39, 0x5d, 0x50, 0xdc,
-	0x87, 0x75, 0x41, 0xd5, 0xe3, 0x94, 0xd8, 0x5e, 0x42, 0x78, 0x6d, 0x0e, 0xe1, 0xe9, 0x97, 0x09,
-	0xaf, 0x1f, 0x86, 0xfe, 0x83, 0x39, 0xac, 0x97, 0x01, 0x09, 0xd7, 0x3f, 0xb3, 0xdd, 0x90, 0xb0,
-	0x38, 0x81, 0xaf, 0x03, 0xb8, 0x91, 0xd4, 0xf2, 0x6d, 0x8f, 0x88, 0xc4, 0x95, 0xcc, 0x92, 0x90,
-	0xec, 0xd9, 0x1e, 0xc1, 0x57, 0x61, 0x3d, 0x63, 0xa4, 0x5c, 0x79, 0x03, 0x2a, 0xd2, 0xea, 0x1b,
-	0x21, 0x17, 0xce, 0x94, 0xcc, 0xb2, 0x3b, 0x85, 0xe2, 0x75, 0x58, 0xdb, 0x89, 0x69, 0xe2, 0xaf,
-	0xe1, 0x2b, 0xca, 0x07, 0x25, 0x54, 0x6c, 0x0d, 0x28, 0x4f, 0x7d, 0x88, 0xc9, 0x20, 0x71, 0x82,
-	0x61, 0x04, 0xb5, 0x7b, 0x8c, 0xd0, 0x1e, 0xb7, 0x79, 0x42, 0xf5, 0xbb, 0x0e, 0x6b, 0x29, 0xa1,
-	0xa2, 0x3a, 0x0f, 0x55, 0x39, 0xc3, 0x4e, 0xe0, 0x5b, 0xd4, 0xe6, 0x32, 0x24, 0xdd, 0x5c, 0x4d,
-	0xa4, 0xa6, 0xcd, 0x49, 0x14, 0xb5, 0x1f, 0x7a, 0x96, 0x4a, 0x65, 0xd4, 0x02, 0x79, 0xb3, 0xe4,
-	0x87, 0x9e, 0xcc, 0x60, 0xd4, 0x55, 0xf6, 0xd8, 0xb1, 0x66, 0x98, 0x72, 0x82, 0xa9, 0x66, 0x8f,
-	0x9d, 0x6e, 0x86, 0xac, 0x09, 0xeb, 0x34, 0x74, 0xc9, 0x2c, 0x3c, 0x2f, 0xe0, 0x6b, 0x91, 0x2a,
-	0x83, 0xc7, 0x5f, 0xc1, 0x7a, 0xe4, 0x78, 0xf7, 0x46, 0xd6, 0xf5, 0xd3, 0xb0, 0x1c, 0x32, 0x42,
-	0x2d, 0x67, 0xa8, 0xca, 0x50, 0x8c, 0x8e, 0xdd, 0x21, 0xba, 0x08, 0xf9, 0xa1, 0xcd, 0x6d, 0xe1,
-	0x66, 0x79, 0xeb, 0x4c, 0x5c, 0xf1, 0x97, 0x82, 0x37, 0x05, 0x0c, 0xdf, 0x02, 0x14, 0xa9, 0x58,
-	0x96, 0xfd, 0x5d, 0x28, 0xb0, 0x48, 0xa0, 0xfa, 0xe6, 0x6c, 0x9a, 0x65, 0xc6, 0x13, 0x53, 0x22,
-	0xf1, 0x6f, 0x3a, 0x18, 0xbb, 0x84, 0x53, 0x67, 0xc0, 0x6e, 0x06, 0x34, 0xdd, 0xf6, 0xec, 0x55,
-	0x8f, 0xdf, 0x55, 0xa8, 0xc4, 0x83, 0x65, 0x31, 0xc2, 0xd5, 0x08, 0x9e, 0x9a, 0x37, 0x82, 0xcc,
-	0x2c, 0xc7, 0xd0, 0x1e, 0xe1, 0xb8, 0x0b, 0x8d, 0x85, 0x3e, 0xab, 0x54, 0x5c, 0x80, 0xa2, 0x27,
-	0x20, 0x2a, 0x17, 0xd5, 0x98, 0x56, 0x1a, 0x9a, 0x4a, 0x1b, 0xc5, 0x7f, 0x62, 0x66, 0xac, 0xa2,
-	0x10, 0xee, 0xd3, 0xc0, 0xb3, 0xe2, 0x45, 0x31, 0xad, 0x56, 0x35, 0x92, 0x77, 0x95, 0xb8, 0x3b,
-	0x4c, 0x97, 0x73, 0x29, 0x53, 0xce, 0x16, 0x14, 0x45, 0x6b, 0xc7, 0x17, 0xcb, 0x5a, 0x26, 0xaa,
-	0x7d, 0xdb, 0xa1, 0x6a, 0x78, 0x15, 0x0c, 0xbd, 0x0d, 0xc5, 0x41, 0xf4, 0x71, 0xb6, 0x91, 0x17,
-	0x06, 0xab, 0xb1, 0x41, 0x7a, 0xd2, 0x15, 0x04, 0xff, 0xa0, 0x43, 0x41, 0xba, 0xfa, 0xaa, 0x6a,
-	0x53, 0x87, 0x15, 0xe2, 0x0f, 0x82, 0xa1, 0xe3, 0x8f, 0xc4, 0x48, 0x14, 0xcc, 0xe4, 0x8c, 0x90,
-	0x6a, 0xd5, 0xa8, 0xf7, 0x2b, 0xaa, 0x1f, 0x37, 0xe0, 0xb5, 0x3e, 0xb5, 0x7d, 0x76, 0x9f, 0x50,
-	0xe1, 0x58, 0x52, 0x08, 0xec, 0x01, 0x4c, 0xf3, 0x9b, 0xca, 0x8b, 0x7e, 0xbc, 0xbc, 0x34, 0x61,
-	0x99, 0xd9, 0xde, 0xd8, 0x15, 0x13, 0x9c, 0x29, 0x64, 0x4f, 0x88, 0x15, 0x3c, 0x06, 0xe1, 0x5f,
-	0x75, 0x28, 0x25, 0x5c, 0xe8, 0x0e, 0xe4, 0x93, 0x2b, 0xaf, 0xb2, 0xfd, 0xa1, 0xda, 0xb5, 0x97,
-	0x8f, 0xf3, 0x4a, 0x08, 0xb9, 0xe3, 0xb6, 0xbe, 0x75, 0x28, 0x69, 0x6e, 0x1f, 0x71, 0xc2, 0x4c,
-	0x41, 0x84, 0xee, 0x42, 0x41, 0xdc, 0x86, 0x22, 0x6d, 0xff, 0x93, 0x51, 0x32, 0xe1, 0x36, 0x14,
-	0x65, 0x28, 0xe8, 0x64, 0x4c, 0x2e, 0xaf, 0x33, 0x79, 0x88, 0xae, 0xe1, 0x39, 0x05, 0x2b, 0xf3,
-	0x69, 0xb5, 0x70, 0x1b, 0x56, 0x33, 0x53, 0x90, 0xd9, 0x6c, 0xfa, 0xb1, 0x36, 0xdb, 0x07, 0x50,
-	0x94, 0x93, 0xf1, 0x9f, 0x4b, 0x84, 0x2d, 0xa8, 0xa4, 0x49, 0xd1, 0x79, 0xc8, 0xf3, 0xa3, 0xb1,
-	0x8c, 0xa2, 0x3a, 0x35, 0x17, 0xea, 0xfe, 0xd1, 0x98, 0x98, 0x42, 0x1d, 0xb5, 0x91, 0xa8, 0x8d,
-	0x1c, 0x1c, 0x99, 0xde, 0x24, 0x03, 0x39, 0x21, 0x94, 0x87, 0xb7, 0x3e, 0x85, 0x52, 0x62, 0x8c,
-	0x4a, 0x50, 0xe8, 0xdc, 0xbd, 0xd7, 0xde, 0xa9, 0x69, 0x68, 0x15, 0x4a, 0x7b, 0x77, 0xfa, 0x96,
-	0x3c, 0xea, 0xe8, 0x04, 0x94, 0xcd, 0xce, 0xad, 0xce, 0x17, 0xd6, 0x6e, 0xbb, 0x7f, 0xfd, 0x76,
-	0x6d, 0x09, 0x21, 0xa8, 0x4a, 0xc1, 0xde, 0x1d, 0x25, 0xcb, 0x6d, 0xfd, 0x5c, 0x80, 0x95, 0x78,
-	0x80, 0xd1, 0x15, 0xc8, 0xef, 0x87, 0xec, 0x10, 0x9d, 0x9c, 0xf7, 0x98, 0xaa, 0x9f, 0x9a, 0x91,
-	0xaa, 0x86, 0xd6, 0xd0, 0x7b, 0x50, 0x10, 0xab, 0x1b, 0xcd, 0x7d, 0x09, 0xd5, 0xe7, 0xbf, 0x6f,
-	0xb0, 0x86, 0x6e, 0x40, 0x39, 0xb5, 0xf2, 0x17, 0x58, 0x9f, 0xcd, 0x48, 0xb3, 0xaf, 0x03, 0xac,
-	0x5d, 0xd2, 0xd1, 0x6d, 0x28, 0xa7, 0xb6, 0x35, 0xaa, 0x67, 0xca, 0x93, 0xd9, 0xfb, 0x53, 0xae,
-	0x39, 0xeb, 0x1d, 0x6b, 0xa8, 0x03, 0x30, 0x5d, 0xd4, 0xe8, 0x4c, 0x06, 0x9c, 0xde, 0xe8, 0xf5,
-	0xfa, 0x3c, 0x55, 0x42, 0xb3, 0x0d, 0xa5, 0x64, 0x4d, 0xa1, 0x8d, 0x39, 0x9b, 0x4b, 0x92, 0x2c,
-	0xde, 0x69, 0x58, 0x43, 0x37, 0xa1, 0xd2, 0x76, 0xdd, 0xe3, 0xd0, 0xd4, 0xd3, 0x1a, 0x36, 0xcb,
-	0xe3, 0xc2, 0xe9, 0x05, 0x9b, 0x01, 0x5d, 0xc8, 0x6e, 0x80, 0x45, 0xeb, 0xae, 0xfe, 0xe6, 0xbf,
-	0xe2, 0x92, 0xaf, 0xed, 0x42, 0x35, 0x7b, 0xeb, 0xa1, 0x45, 0x4f, 0xb5, 0xba, 0x91, 0x28, 0xe6,
-	0x5f, 0x93, 0xda, 0xa6, 0xbe, 0xfd, 0xd1, 0xe3, 0x67, 0x86, 0xf6, 0xe4, 0x99, 0xa1, 0xbd, 0x78,
-	0x66, 0xe8, 0xdf, 0x4f, 0x0c, 0xfd, 0x97, 0x89, 0xa1, 0x3f, 0x9a, 0x18, 0xfa, 0xe3, 0x89, 0xa1,
-	0xff, 0x35, 0x31, 0xf4, 0xbf, 0x27, 0x86, 0xf6, 0x62, 0x62, 0xe8, 0x3f, 0x3e, 0x37, 0xb4, 0xc7,
-	0xcf, 0x0d, 0xed, 0xc9, 0x73, 0x43, 0xfb, 0xb2, 0x28, 0xff, 0xc6, 0x1c, 0x14, 0xc5, 0x3f, 0x91,
-	0xcb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x33, 0xbf, 0x53, 0xf9, 0x04, 0x0d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
index 02e17e35b8a1f6472fe709fa2cf9170ce4cff9a2..e1659919920f75fef7ff4e5c1571a8ee11e6df0a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
@@ -104,7 +104,7 @@ message MetricsForLabelMatchersResponse {
 message TimeSeriesChunk {
   string from_ingester_id = 1;
   string user_id = 2;
-  repeated LabelPair labels = 3 [(gogoproto.nullable) = false];
+  repeated LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
   repeated Chunk chunks = 4 [(gogoproto.nullable) = false];
 }
 
@@ -119,14 +119,14 @@ message TransferChunksResponse {
 }
 
 message TimeSeries {
-  repeated LabelPair labels = 1 [(gogoproto.nullable) = false];
+  repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
   // Sorted by time, oldest sample first.
   repeated Sample samples   = 2 [(gogoproto.nullable) = false];
 }
 
 message LabelPair {
-  bytes name  = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
-  bytes value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
+  bytes name  = 1;
+  bytes value = 2;
 }
 
 message Sample {
@@ -139,7 +139,7 @@ message LabelMatchers {
 }
 
 message Metric {
-  repeated LabelPair labels = 1 [(gogoproto.nullable) = false];
+  repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
 }
 
 enum MatchType {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
index 41453b10aaa9a4462424270daa2f4c7999486322..c415085950606edaeb08cf0321b70a0317c483e4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
@@ -19,6 +19,8 @@ package client
 const (
 	offset64 = 14695981039346656037
 	prime64  = 1099511628211
+	offset32 = 2166136261
+	prime32  = 16777619
 )
 
 // hashNew initializies a new fnv64a hash value.
@@ -27,7 +29,8 @@ func hashNew() uint64 {
 }
 
 // hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s []byte) uint64 {
+// Note this is the same algorithm as Go stdlib `sum64a.Write()`
+func hashAdd(h uint64, s string) uint64 {
 	for i := 0; i < len(s); i++ {
 		h ^= uint64(s[i])
 		h *= prime64
@@ -41,3 +44,18 @@ func hashAddByte(h uint64, b byte) uint64 {
 	h *= prime64
 	return h
 }
+
+// HashNew32 initializies a new fnv32 hash value.
+func HashNew32() uint32 {
+	return offset32
+}
+
+// HashAdd32 adds a string to a fnv32 hash value, returning the updated hash.
+// Note this is the same algorithm as Go stdlib `sum32.Write()`
+func HashAdd32(h uint32, s string) uint32 {
+	for i := 0; i < len(s); i++ {
+		h *= prime32
+		h ^= uint32(s[i])
+	}
+	return h
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
index b5b46b9ab878ddae8cf207c0bbddb4d070544dd9..8c7cc40588c8a9fe821c72879004365e28cbaf8b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
@@ -1,6 +1,14 @@
 package client
 
-import "flag"
+import (
+	"flag"
+	"fmt"
+	"io"
+	"strings"
+	"unsafe"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+)
 
 var (
 	expectedTimeseries       = 100
@@ -37,7 +45,182 @@ type PreallocTimeseries struct {
 
 // Unmarshal implements proto.Message.
 func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
-	p.Labels = make([]LabelPair, 0, expectedLabels)
+	p.Labels = make([]LabelAdapter, 0, expectedLabels)
 	p.Samples = make([]Sample, 0, expectedSamplesPerSeries)
 	return p.TimeSeries.Unmarshal(dAtA)
 }
+
+// LabelAdapter is a labels.Label that can be marshalled to/from protos.
+type LabelAdapter labels.Label
+
+// Marshal implements proto.Marshaller.
+func (bs *LabelAdapter) Marshal() ([]byte, error) {
+	buf := make([]byte, bs.Size())
+	_, err := bs.MarshalTo(buf)
+	return buf, err
+}
+
+// MarshalTo implements proto.Marshaller.
+func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) {
+	var i int
+	ls := (*labels.Label)(bs)
+
+	buf[i] = 0xa
+	i++
+	i = encodeVarintCortex(buf, i, uint64(len(ls.Name)))
+	i += copy(buf[i:], ls.Name)
+
+	buf[i] = 0x12
+	i++
+	i = encodeVarintCortex(buf, i, uint64(len(ls.Value)))
+	i += copy(buf[i:], ls.Value)
+
+	return i, nil
+}
+
+// Unmarshal a LabelAdapater, implements proto.Unmarshaller.
+// NB this is a copy of the autogenerated code to unmarshal a LabelPair,
+// with the byte copying replaced with a yoloString.
+func (bs *LabelAdapter) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowCortex
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LabelPair: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			bs.Name = yoloString(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCortex
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthCortex
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			bs.Value = yoloString(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipCortex(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthCortex
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func yoloString(buf []byte) string {
+	return *((*string)(unsafe.Pointer(&buf)))
+}
+
+// Size implements proto.Sizer.
+func (bs *LabelAdapter) Size() int {
+	ls := (*labels.Label)(bs)
+	var n int
+	l := len(ls.Name)
+	n += 1 + l + sovCortex(uint64(l))
+	l = len(ls.Value)
+	n += 1 + l + sovCortex(uint64(l))
+	return n
+}
+
+// Equal implements proto.Equaler.
+func (bs *LabelAdapter) Equal(other LabelAdapter) bool {
+	return bs.Name == other.Name && bs.Value == other.Value
+}
+
+// Compare implements proto.Comparer.
+func (bs *LabelAdapter) Compare(other LabelAdapter) int {
+	if c := strings.Compare(bs.Name, other.Name); c != 0 {
+		return c
+	}
+	return strings.Compare(bs.Value, other.Value)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
index 32771046379f7425bdbcb507627a820749c12931..6e072e70335e106c522d07923145880a2ecea1f1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
@@ -32,7 +32,7 @@ func New() *InvertedIndex {
 }
 
 // Add a fingerprint under the specified labels.
-func (ii *InvertedIndex) Add(labels []client.LabelPair, fp model.Fingerprint) labels.Labels {
+func (ii *InvertedIndex) Add(labels []client.LabelAdapter, fp model.Fingerprint) labels.Labels {
 	shard := &ii.shards[util.HashFP(fp)%indexShards]
 	return shard.add(labels, fp)
 }
@@ -49,7 +49,6 @@ func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher) []model.Fingerprint
 		result = append(result, fps...)
 	}
 
-	sort.Sort(fingerprints(result))
 	return result
 }
 
@@ -105,25 +104,31 @@ type indexShard struct {
 	pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(unlockIndex{})]byte
 }
 
+func copyString(s string) string {
+	return string([]byte(s))
+}
+
 // add metric to the index; return all the name/value pairs as strings from the index, sorted
-func (shard *indexShard) add(metric []client.LabelPair, fp model.Fingerprint) labels.Labels {
+func (shard *indexShard) add(metric []client.LabelAdapter, fp model.Fingerprint) labels.Labels {
 	shard.mtx.Lock()
 	defer shard.mtx.Unlock()
 
 	internedLabels := make(labels.Labels, len(metric))
 
 	for i, pair := range metric {
-		values, ok := shard.idx[string(pair.Name)]
+		values, ok := shard.idx[pair.Name]
 		if !ok {
 			values = indexEntry{
-				name: string(pair.Name),
+				name: copyString(pair.Name),
 				fps:  map[string]indexValueEntry{},
 			}
 			shard.idx[values.name] = values
 		}
-		fingerprints, ok := values.fps[string(pair.Value)]
+		fingerprints, ok := values.fps[pair.Value]
 		if !ok {
-			fingerprints = indexValueEntry{value: string(pair.Value)}
+			fingerprints = indexValueEntry{
+				value: copyString(pair.Value),
+			}
 		}
 		// Insert into the right position to keep fingerprints sorted
 		j := sort.Search(len(fingerprints.fps), func(i int) bool {
@@ -133,7 +138,7 @@ func (shard *indexShard) add(metric []client.LabelPair, fp model.Fingerprint) la
 		copy(fingerprints.fps[j+1:], fingerprints.fps[j:])
 		fingerprints.fps[j] = fp
 		values.fps[fingerprints.value] = fingerprints
-		internedLabels[i] = labels.Label{Name: string(values.name), Value: string(fingerprints.value)}
+		internedLabels[i] = labels.Label{Name: values.name, Value: fingerprints.value}
 	}
 	sort.Sort(internedLabels)
 	return internedLabels
@@ -162,7 +167,7 @@ func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint
 			// accumulate the matching fingerprints (which are all distinct)
 			// then sort to maintain the invariant
 			for value, fps := range values.fps {
-				if matcher.Matches(string(value)) {
+				if matcher.Matches(value) {
 					toIntersect = append(toIntersect, fps.fps...)
 				}
 			}
@@ -213,7 +218,7 @@ func (shard *indexShard) delete(labels labels.Labels, fp model.Fingerprint) {
 	defer shard.mtx.Unlock()
 
 	for _, pair := range labels {
-		name, value := string(pair.Name), string(pair.Value)
+		name, value := pair.Name, pair.Value
 		values, ok := shard.idx[name]
 		if !ok {
 			continue
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
index e67bc62a436d5cd973d5d00b5488b545ec3886b9..df54ce305a816d972f7335555b55396a9e1a941b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
@@ -3,18 +3,17 @@
 
 package ring
 
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import strconv "strconv"
-
-import strings "strings"
-import reflect "reflect"
-import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+	fmt "fmt"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	io "io"
+	math "math"
+	reflect "reflect"
+	strconv "strconv"
+	strings "strings"
+)
 
 // Reference imports to suppress errors if they are not otherwise used.
 var _ = proto.Marshal
@@ -42,6 +41,7 @@ var IngesterState_name = map[int32]string{
 	2: "PENDING",
 	3: "JOINING",
 }
+
 var IngesterState_value = map[string]int32{
 	"ACTIVE":  0,
 	"LEAVING": 1,
@@ -50,18 +50,18 @@ var IngesterState_value = map[string]int32{
 }
 
 func (IngesterState) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_ring_35bba6cb303d16e3, []int{0}
+	return fileDescriptor_7ebe6ffe1686e76b, []int{0}
 }
 
 type Desc struct {
-	Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
-	Tokens    []TokenDesc             `protobuf:"bytes,2,rep,name=tokens" json:"tokens"`
+	Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Tokens    []TokenDesc             `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens"`
 }
 
 func (m *Desc) Reset()      { *m = Desc{} }
 func (*Desc) ProtoMessage() {}
 func (*Desc) Descriptor() ([]byte, []int) {
-	return fileDescriptor_ring_35bba6cb303d16e3, []int{0}
+	return fileDescriptor_7ebe6ffe1686e76b, []int{0}
 }
 func (m *Desc) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -78,8 +78,8 @@ func (m *Desc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *Desc) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Desc.Merge(dst, src)
+func (m *Desc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Desc.Merge(m, src)
 }
 func (m *Desc) XXX_Size() int {
 	return m.Size()
@@ -108,13 +108,13 @@ type IngesterDesc struct {
 	Addr      string        `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
 	Timestamp int64         `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
 	State     IngesterState `protobuf:"varint,3,opt,name=state,proto3,enum=ring.IngesterState" json:"state,omitempty"`
-	Tokens    []uint32      `protobuf:"varint,6,rep,packed,name=tokens" json:"tokens,omitempty"`
+	Tokens    []uint32      `protobuf:"varint,6,rep,packed,name=tokens,proto3" json:"tokens,omitempty"`
 }
 
 func (m *IngesterDesc) Reset()      { *m = IngesterDesc{} }
 func (*IngesterDesc) ProtoMessage() {}
 func (*IngesterDesc) Descriptor() ([]byte, []int) {
-	return fileDescriptor_ring_35bba6cb303d16e3, []int{1}
+	return fileDescriptor_7ebe6ffe1686e76b, []int{1}
 }
 func (m *IngesterDesc) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -131,8 +131,8 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
 		return b[:n], nil
 	}
 }
-func (dst *IngesterDesc) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IngesterDesc.Merge(dst, src)
+func (m *IngesterDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IngesterDesc.Merge(m, src)
 }
 func (m *IngesterDesc) XXX_Size() int {
 	return m.Size()
@@ -179,7 +179,7 @@ type TokenDesc struct {
 func (m *TokenDesc) Reset()      { *m = TokenDesc{} }
 func (*TokenDesc) ProtoMessage() {}
 func (*TokenDesc) Descriptor() ([]byte, []int) {
-	return fileDescriptor_ring_35bba6cb303d16e3, []int{2}
+	return fileDescriptor_7ebe6ffe1686e76b, []int{2}
 }
 func (m *TokenDesc) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -196,8 +196,8 @@ func (m *TokenDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
 		return b[:n], nil
 	}
 }
-func (dst *TokenDesc) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TokenDesc.Merge(dst, src)
+func (m *TokenDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenDesc.Merge(m, src)
 }
 func (m *TokenDesc) XXX_Size() int {
 	return m.Size()
@@ -223,12 +223,49 @@ func (m *TokenDesc) GetIngester() string {
 }
 
 func init() {
+	proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value)
 	proto.RegisterType((*Desc)(nil), "ring.Desc")
 	proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry")
 	proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc")
 	proto.RegisterType((*TokenDesc)(nil), "ring.TokenDesc")
-	proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value)
 }
+
+func init() {
+	proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_7ebe6ffe1686e76b)
+}
+
+var fileDescriptor_7ebe6ffe1686e76b = []byte{
+	// 440 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9,
+	0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a,
+	0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09,
+	0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac,
+	0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44,
+	0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59,
+	0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1,
+	0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06,
+	0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c,
+	0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5,
+	0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf,
+	0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1,
+	0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb,
+	0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86,
+	0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2,
+	0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1,
+	0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79,
+	0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e,
+	0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb,
+	0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d,
+	0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff,
+	0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c,
+	0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd,
+	0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1,
+	0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a,
+	0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff,
+	0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00,
+}
+
 func (x IngesterState) String() string {
 	s, ok := IngesterState_name[int32(x)]
 	if ok {
@@ -435,9 +472,9 @@ func (m *Desc) MarshalTo(dAtA []byte) (int, error) {
 			dAtA[i] = 0x12
 			i++
 			i = encodeVarintRing(dAtA, i, uint64((&v).Size()))
-			n1, err := (&v).MarshalTo(dAtA[i:])
-			if err != nil {
-				return 0, err
+			n1, err1 := (&v).MarshalTo(dAtA[i:])
+			if err1 != nil {
+				return 0, err1
 			}
 			i += n1
 		}
@@ -629,6 +666,11 @@ func (this *Desc) String() string {
 	if this == nil {
 		return "nil"
 	}
+	repeatedStringForTokens := "[]TokenDesc{"
+	for _, f := range this.Tokens {
+		repeatedStringForTokens += strings.Replace(strings.Replace(f.String(), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTokens += "}"
 	keysForIngesters := make([]string, 0, len(this.Ingesters))
 	for k, _ := range this.Ingesters {
 		keysForIngesters = append(keysForIngesters, k)
@@ -641,7 +683,7 @@ func (this *Desc) String() string {
 	mapStringForIngesters += "}"
 	s := strings.Join([]string{`&Desc{`,
 		`Ingesters:` + mapStringForIngesters + `,`,
-		`Tokens:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tokens), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + `,`,
+		`Tokens:` + repeatedStringForTokens + `,`,
 		`}`,
 	}, "")
 	return s
@@ -693,7 +735,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -721,7 +763,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -730,6 +772,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthRing
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRing
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -750,7 +795,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					wire |= (uint64(b) & 0x7F) << shift
+					wire |= uint64(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -767,7 +812,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						stringLenmapkey |= uint64(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -777,6 +822,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 						return ErrInvalidLengthRing
 					}
 					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthRing
+					}
 					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
@@ -793,7 +841,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						mapmsglen |= (int(b) & 0x7F) << shift
+						mapmsglen |= int(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -802,7 +850,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 						return ErrInvalidLengthRing
 					}
 					postmsgIndex := iNdEx + mapmsglen
-					if mapmsglen < 0 {
+					if postmsgIndex < 0 {
 						return ErrInvalidLengthRing
 					}
 					if postmsgIndex > l {
@@ -844,7 +892,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= (int(b) & 0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -853,6 +901,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthRing
 			}
 			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRing
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -870,6 +921,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthRing
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRing
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -897,7 +951,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -925,7 +979,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -935,6 +989,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthRing
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRing
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -954,7 +1011,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Timestamp |= (int64(b) & 0x7F) << shift
+				m.Timestamp |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -973,7 +1030,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.State |= (IngesterState(b) & 0x7F) << shift
+				m.State |= IngesterState(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -990,7 +1047,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					v |= (uint32(b) & 0x7F) << shift
+					v |= uint32(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1007,7 +1064,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					packedLen |= (int(b) & 0x7F) << shift
+					packedLen |= int(b&0x7F) << shift
 					if b < 0x80 {
 						break
 					}
@@ -1016,12 +1073,15 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 					return ErrInvalidLengthRing
 				}
 				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRing
+				}
 				if postIndex > l {
 					return io.ErrUnexpectedEOF
 				}
 				var elementCount int
 				var count int
-				for _, integer := range dAtA {
+				for _, integer := range dAtA[iNdEx:postIndex] {
 					if integer < 128 {
 						count++
 					}
@@ -1041,7 +1101,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 						}
 						b := dAtA[iNdEx]
 						iNdEx++
-						v |= (uint32(b) & 0x7F) << shift
+						v |= uint32(b&0x7F) << shift
 						if b < 0x80 {
 							break
 						}
@@ -1060,6 +1120,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthRing
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRing
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1087,7 +1150,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
 			}
 			b := dAtA[iNdEx]
 			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
+			wire |= uint64(b&0x7F) << shift
 			if b < 0x80 {
 				break
 			}
@@ -1115,7 +1178,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.Token |= (uint32(b) & 0x7F) << shift
+				m.Token |= uint32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1134,7 +1197,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -1144,6 +1207,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
 				return ErrInvalidLengthRing
 			}
 			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRing
+			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1158,6 +1224,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
 			if skippy < 0 {
 				return ErrInvalidLengthRing
 			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRing
+			}
 			if (iNdEx + skippy) > l {
 				return io.ErrUnexpectedEOF
 			}
@@ -1224,10 +1293,13 @@ func skipRing(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			iNdEx += length
 			if length < 0 {
 				return 0, ErrInvalidLengthRing
 			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRing
+			}
 			return iNdEx, nil
 		case 3:
 			for {
@@ -1256,6 +1328,9 @@ func skipRing(dAtA []byte) (n int, err error) {
 					return 0, err
 				}
 				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRing
+				}
 			}
 			return iNdEx, nil
 		case 4:
@@ -1274,39 +1349,3 @@ var (
 	ErrInvalidLengthRing = fmt.Errorf("proto: negative length found during unmarshaling")
 	ErrIntOverflowRing   = fmt.Errorf("proto: integer overflow")
 )
-
-func init() {
-	proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_ring_35bba6cb303d16e3)
-}
-
-var fileDescriptor_ring_35bba6cb303d16e3 = []byte{
-	// 440 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40,
-	0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9,
-	0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a,
-	0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09,
-	0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac,
-	0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44,
-	0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59,
-	0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1,
-	0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06,
-	0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c,
-	0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5,
-	0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf,
-	0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1,
-	0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb,
-	0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86,
-	0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2,
-	0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1,
-	0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79,
-	0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e,
-	0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb,
-	0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d,
-	0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff,
-	0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c,
-	0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd,
-	0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1,
-	0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a,
-	0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff,
-	0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
index e170382f1834c07be9f7ee8a780d4704471582b2..8de926d83e2cc5e84608e45f39d5c7bf69e27954 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
@@ -8,16 +8,14 @@ import (
 	"github.com/prometheus/prometheus/pkg/labels"
 )
 
-var labelNameBytes = []byte(model.MetricNameLabel)
-
-// MetricNameFromLabelPairs extracts the metric name from a list of LabelPairs.
-func MetricNameFromLabelPairs(labels []client.LabelPair) ([]byte, error) {
+// MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs.
+func MetricNameFromLabelAdapters(labels []client.LabelAdapter) (string, error) {
 	for _, label := range labels {
-		if label.Name.Equal(labelNameBytes) {
+		if label.Name == model.MetricNameLabel {
 			return label.Value, nil
 		}
 	}
-	return nil, fmt.Errorf("No metric name label")
+	return "", fmt.Errorf("No metric name label")
 }
 
 // MetricNameFromMetric extract the metric name from a model.Metric
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go
new file mode 100644
index 0000000000000000000000000000000000000000..d61c99b2028ec353d58f8c3983d7b9e0e0080967
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go
@@ -0,0 +1,26 @@
+package flagext
+
+import (
+	"flag"
+
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/go-kit/kit/log/level"
+)
+
+type deprecatedFlag struct {
+	name string
+}
+
+func (deprecatedFlag) String() string {
+	return "deprecated"
+}
+
+func (d deprecatedFlag) Set(string) error {
+	level.Warn(util.Logger).Log("msg", "flag disabled", "flag", d.name)
+	return nil
+}
+
+// DeprecatedFlag logs a warning when you try to use it.
+func DeprecatedFlag(f *flag.FlagSet, name, message string) {
+	f.Var(deprecatedFlag{name}, name, message)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
index ba0a03801e2edade9d38041ab3f7750d1ff4d0f2..209b8b45c0646cc4beba8cffac9b27a3891b3a85 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
@@ -9,6 +9,6 @@ import "github.com/prometheus/common/model"
 // function we use is prone to only change a few bits for similar metrics. We
 // really want to make use of every change in the fingerprint to vary mutex
 // selection.)
-func HashFP(fp model.Fingerprint) uint {
-	return uint(fp ^ (fp >> 32) ^ (fp >> 16))
+func HashFP(fp model.Fingerprint) uint32 {
+	return uint32(fp ^ (fp >> 32) ^ (fp >> 16))
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go
index 6460e2dd147507e9eb5ff24bef1297ccba0dfde7..908378ab47415d33dedb0252fc3ef9af03cfb3fb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go
@@ -44,8 +44,14 @@ func InitLogger(cfg *server.Config) {
 		panic(err)
 	}
 
-	Logger = l
-	cfg.Log = logging.GoKit(l)
+	// when use util.Logger, skip 3 stack frames.
+	Logger = log.With(l, "caller", log.Caller(3))
+
+	// cfg.Log wraps log function, skip 4 stack frames to get caller information.
+	// this works in go 1.12, but doesn't work in versions earlier.
+	// it will always shows the wrapper function generated by compiler
+	// marked <autogenerated> in old versions.
+	cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4)))
 }
 
 // PrometheusLogger exposes Prometheus counters for each of go-kit's log levels.
@@ -68,8 +74,8 @@ func NewPrometheusLogger(l logging.Level) (log.Logger, error) {
 		logger: logger,
 	}
 
-	// DefaultCaller must be the last wrapper
-	logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+	// return a Logger without caller information, shouldn't use directly
+	logger = log.With(logger, "ts", log.DefaultTimestampUTC)
 	return logger, nil
 }
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
index a2186c96a901bb4216c39b0bd301b55792e48c5c..0837b86ab1c3a900d59b78acb0de1d4aa3f766f3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
@@ -3,8 +3,6 @@ package validation
 import (
 	"flag"
 	"time"
-
-	"github.com/cortexproject/cortex/pkg/util/flagext"
 )
 
 // Limits describe all the limits for users; can be used to describe global default
@@ -28,8 +26,10 @@ type Limits struct {
 	MaxSeriesPerMetric int `yaml:"max_series_per_metric"`
 
 	// Querier enforced limits.
-	MaxChunksPerQuery int           `yaml:"max_chunks_per_query"`
-	MaxQueryLength    time.Duration `yaml:"max_query_length"`
+	MaxChunksPerQuery   int           `yaml:"max_chunks_per_query"`
+	MaxQueryLength      time.Duration `yaml:"max_query_length"`
+	MaxQueryParallelism int           `yaml:"max_query_parallelism"`
+	CardinalityLimit    int           `yaml:"cardinality_limit"`
 
 	// Config for overrides, convenient if it goes here.
 	PerTenantOverrideConfig string
@@ -55,6 +55,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
 
 	f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.")
 	f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.")
+	f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.")
+	f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.")
 
 	f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides.")
 	f.DurationVar(&l.PerTenantOverridePeriod, "limits.per-user-override-period", 10*time.Second, "Period with this to reload the overrides.")
@@ -65,7 +67,7 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error {
 	// We want to set c to the defaults and then overwrite it with the input.
 	// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
 	// again, we have to hide it using a type indirection.  See prometheus/config.
-	flagext.DefaultValues(l)
+	*l = defaultLimits
 	type plain Limits
 	return unmarshal((*plain)(l))
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
index ce810e69e9b75fe0ae0b759eb6c3e327b0a9c28a..a2553f7de813ad565858d6a57ee3b946631f6010 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
@@ -18,6 +18,12 @@ var overridesReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{
 	Help: "Whether the last overrides reload attempt was successful.",
 })
 
+// When we load YAML from disk, we want the various per-customer limits
+// to default to any values specified on the command line, not default
+// command line values.  This global contains those values.  I (Tom) cannot
+// find a nicer way I'm afraid.
+var defaultLimits Limits
+
 // Overrides periodically fetch a set of per-user overrides, and provides convenience
 // functions for fetching the correct value.
 type Overrides struct {
@@ -28,7 +34,12 @@ type Overrides struct {
 }
 
 // NewOverrides makes a new Overrides.
+// We store the supplied limits in a global variable to ensure per-tenant limits
+// are defaulted to those values.  As such, the last call to NewOverrides will
+// become the new global defaults.
 func NewOverrides(defaults Limits) (*Overrides, error) {
+	defaultLimits = defaults
+
 	if defaults.PerTenantOverrideConfig == "" {
 		level.Info(util.Logger).Log("msg", "per-tenant overides disabled")
 		return &Overrides{
@@ -242,9 +253,24 @@ func (o *Overrides) MaxQueryLength(userID string) time.Duration {
 	})
 }
 
+// MaxQueryParallelism returns the limit to the number of sub-queries the
+// frontend will process in parallel.
+func (o *Overrides) MaxQueryParallelism(userID string) int {
+	return o.getInt(userID, func(l *Limits) int {
+		return l.MaxQueryParallelism
+	})
+}
+
 // EnforceMetricName whether to enforce the presence of a metric name.
 func (o *Overrides) EnforceMetricName(userID string) bool {
 	return o.getBool(userID, func(l *Limits) bool {
 		return l.EnforceMetricName
 	})
 }
+
+// CardinalityLimit whether to enforce the presence of a metric name.
+func (o *Overrides) CardinalityLimit(userID string) int {
+	return o.getInt(userID, func(l *Limits) int {
+		return l.CardinalityLimit
+	})
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
index 750ceb25b910c7541e3266e6b9ad3278ca90be8a..3aff0fa72d7a1aad7870b77c2c87b50f83a7669f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
@@ -22,6 +22,9 @@ const (
 	errTooOld            = "sample for '%s' has timestamp too old: %d"
 	errTooNew            = "sample for '%s' has timestamp too new: %d"
 
+	// ErrQueryTooLong is used in chunk store and query frontend.
+	ErrQueryTooLong = "invalid query, length > limit (%s > %s)"
+
 	greaterThanMaxSampleAge = "greater_than_max_sample_age"
 	maxLabelNamesPerSeries  = "max_label_names_per_series"
 	tooFarInFuture          = "too_far_in_future"
@@ -48,7 +51,7 @@ func init() {
 }
 
 // ValidateSample returns an err if the sample is invalid.
-func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client.Sample) error {
+func (cfg *Overrides) ValidateSample(userID string, metricName string, s client.Sample) error {
 	if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
 		DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
 		return httpgrpc.Errorf(http.StatusBadRequest, errTooOld, metricName, model.Time(s.TimestampMs))
@@ -63,8 +66,8 @@ func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client.
 }
 
 // ValidateLabels returns an err if the labels are invalid.
-func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error {
-	metricName, err := extract.MetricNameFromLabelPairs(ls)
+func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelAdapter) error {
+	metricName, err := extract.MetricNameFromLabelAdapters(ls)
 	if cfg.EnforceMetricName(userID) {
 		if err != nil {
 			return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName)
@@ -78,7 +81,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error
 	numLabelNames := len(ls)
 	if numLabelNames > cfg.MaxLabelNamesPerSeries(userID) {
 		DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc()
-		return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelPairs(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID))
+		return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelAdaptersToMetric(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID))
 	}
 
 	maxLabelNameLength := cfg.MaxLabelNameLength(userID)
@@ -102,7 +105,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error
 		}
 		if errTemplate != "" {
 			DiscardedSamples.WithLabelValues(reason, userID).Inc()
-			return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelPairs(ls).String())
+			return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelAdaptersToMetric(ls).String())
 		}
 	}
 	return nil