diff --git a/Gopkg.lock b/Gopkg.lock index 28e587fe62e0a5ff17badb340b35581306c5a0f8..31d0ccbc7286f937b46d7bf877e86725cebf27fb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -187,7 +187,7 @@ [[projects]] branch = "master" - digest = "1:d9488f98e486896b56f406cfa36bf8f0b9f049bdfa6c67b28cc2b20328010adc" + digest = "1:2b1af99463a6c3c85f5f7716fb013bec568ff2187eab6e4658b06d73e2c41a8b" name = "github.com/cortexproject/cortex" packages = [ "pkg/chunk", @@ -213,7 +213,7 @@ "pkg/util/validation", ] pruneopts = "UT" - revision = "be63a81445db6e9481a577a70ca0623ef6f97873" + revision = "88541de7a8aafbf501395ec767f059421c25d4c9" [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml index d4df0735003686710923ea8129f319fd4c7992ce..523a854bc48b2d88531702b9f8e23cf4d066e1bb 100644 --- a/cmd/loki/loki-local-config.yaml +++ b/cmd/loki/loki-local-config.yaml @@ -7,7 +7,8 @@ ingester: lifecycler: address: 127.0.0.1 ring: - store: inmemory + kvstore: + store: inmemory replication_factor: 1 chunk_idle_period: 15m @@ -31,5 +32,5 @@ storage_config: limits_config: enforce_metric_name: false -querier: +chunk_store_config: max_look_back_period: 0 diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go index a87c29a158e7a07a7a48108a59053203d5a85477..2928029723bd2ad2bd16d35f19094831e1ff04e1 100644 --- a/pkg/ingester/flush.go +++ b/pkg/ingester/flush.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk" @@ -58,8 +59,8 @@ const ( // position, not wallclock time. flushBackoff = 1 * time.Second - nameLabel = model.LabelName("__name__") - logsValue = model.LabelValue("logs") + nameLabel = "__name__" + logsValue = "logs" ) // Flush triggers a flush of all the chunks and closes the flush queues. @@ -254,8 +255,9 @@ func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelP return err } - metric := client.FromLabelAdaptersToMetric(labelPairs) - metric[nameLabel] = logsValue + labelsBuilder := labels.NewBuilder(client.FromLabelAdaptersToLabels(labelPairs)) + labelsBuilder.Set(nameLabel, logsValue) + metric := labelsBuilder.Labels() wireChunks := make([]chunk.Chunk, 0, len(cs)) for _, c := range cs { diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 0b53c08e1cf38f000c41e0147fa7b2bd790b4ee8..601134807c063527f56e7986f46374b5477a2b4d 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" "golang.org/x/net/context" @@ -67,13 +68,13 @@ func newTestStore(t require.TestingT, cfg Config) (*testStore, *Ingester) { // nolint func defaultIngesterTestConfig() Config { - consul := ring.NewInMemoryKVClient() + consul := ring.NewInMemoryKVClient(ring.ProtoCodec{Factory: ring.ProtoDescFactory}) cfg := Config{} flagext.DefaultValues(&cfg) cfg.FlushCheckPeriod = 99999 * time.Hour cfg.MaxChunkIdle = 99999 * time.Hour cfg.ConcurrentFlushes = 1 - cfg.LifecyclerConfig.RingConfig.Mock = consul + cfg.LifecyclerConfig.RingConfig.KVStore.Mock = consul cfg.LifecyclerConfig.NumTokens = 1 cfg.LifecyclerConfig.ListenPort = func(i int) *int { return &i }(0) cfg.LifecyclerConfig.Addr = "localhost" @@ -91,9 +92,9 @@ func (s *testStore) Put(ctx context.Context, chunks []chunk.Chunk) error { return err } for _, chunk := range chunks { - for k, v := range chunk.Metric { - if v == "" { - return fmt.Errorf("Chunk has blank label %q", k) + for _, label := range chunk.Metric { + if label.Value == "" { + return fmt.Errorf("Chunk has blank label %q", label.Name) } } } @@ -157,7 +158,11 @@ func (s *testStore) checkData(t *testing.T, userIDs []string, testData map[strin streams := []*logproto.Stream{} for _, chunk := range chunks { lokiChunk := chunk.Data.(*chunkenc.Facade).LokiChunk() - delete(chunk.Metric, nameLabel) + if chunk.Metric.Has("__name__") { + labelsBuilder := labels.NewBuilder(chunk.Metric) + labelsBuilder.Del("__name__") + chunk.Metric = labelsBuilder.Labels() + } labels := chunk.Metric.String() streams = append(streams, buildStreamsFromChunk(t, labels, lokiChunk)) } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 516e68d2759592349dd1e72998ca129ee54bc253..d30000d76def5fde94d4b6b7bd9737bb223c1cc7 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -91,7 +91,7 @@ func New(cfg Config, store ChunkStore) (*Ingester, error) { } var err error - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i) + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester") if err != nil { return nil, err } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index bdacb342236e73b8e2f86d2f9df6fea7c134612b..54940eaa1ed5b1b1d2ceb272bdc1543d59ae82b4 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -93,7 +93,7 @@ func (t *Loki) initServer() (err error) { } func (t *Loki) initRing() (err error) { - t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig) + t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig, "ingester") if err != nil { return } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index c7dbde62c3535e62946d1c508115e27a6f5b6be7..99a15e259485748917980222fbbe6f4b38e226fb 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -3,7 +3,6 @@ package querier import ( "context" "flag" - "time" "github.com/cortexproject/cortex/pkg/chunk" cortex_client "github.com/cortexproject/cortex/pkg/ingester/client" @@ -19,13 +18,10 @@ import ( // Config for a querier. type Config struct { - // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. - MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` } // RegisterFlags register flags. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.MaxLookBackPeriod, "querier.max_look_back_period", 0, "Limit how long back data can be queried") } // Querier handlers queries. @@ -96,13 +92,6 @@ func (q *Querier) forAllIngesters(f func(logproto.QuerierClient) (interface{}, e // Query does the heavy lifting for an actual query. func (q *Querier) Query(ctx context.Context, req *logproto.QueryRequest) (*logproto.QueryResponse, error) { - if q.cfg.MaxLookBackPeriod != 0 { - oldestStartTime := time.Now().Add(-q.cfg.MaxLookBackPeriod) - if oldestStartTime.After(req.Start) { - req.Start = oldestStartTime - } - } - ingesterIterators, err := q.queryIngesters(ctx, req) if err != nil { return nil, err diff --git a/pkg/querier/store.go b/pkg/querier/store.go index 6bb80fcf600e242b9f9222af4edf98bcd5a1dd1b..3c3edab5b1be74ee0e8f914be23ab8f2f7252c27 100644 --- a/pkg/querier/store.go +++ b/pkg/querier/store.go @@ -80,7 +80,7 @@ func filterSeriesByMatchers(chks map[model.Fingerprint][][]chunkenc.LazyChunk, m outer: for fp, chunks := range chks { for _, matcher := range matchers { - if !matcher.Matches(string(chunks[0][0].Chunk.Metric[model.LabelName(matcher.Name)])) { + if !matcher.Matches(chunks[0][0].Chunk.Metric.Get(matcher.Name)) { delete(chks, fp) continue outer } @@ -184,7 +184,11 @@ func partitionBySeriesChunks(chunks [][]chunk.Chunk, fetchers []*chunk.Fetcher) for _, c := range chks { fp := c.Fingerprint chunksByFp[fp] = append(chunksByFp[fp], chunkenc.LazyChunk{Chunk: c, Fetcher: fetchers[i]}) - delete(c.Metric, "__name__") + if c.Metric.Has("__name__") { + labelsBuilder := labels.NewBuilder(c.Metric) + labelsBuilder.Del("__name__") + c.Metric = labelsBuilder.Labels() + } } } diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 7e30a32917be27468a580469f8fc2b6e69fce4ac..1cbfff557c99c502b6a058967a9681746c15951c 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -26,7 +26,8 @@ config: chunk_block_size: 262144 lifecycler: ring: - store: inmemory + kvstore: + store: inmemory replication_factor: 1 ## Different ring configs can be used. E.g. Consul @@ -56,7 +57,7 @@ config: directory: /data/loki/index filesystem: directory: /data/loki/chunks - querier: + chunk_store_config: max_look_back_period: 0 deploymentStrategy: RollingUpdate diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index bb49521922e679781d6c752d194baf98c6d6b141..8960bc4ebe3aaa33b107441dbd86c2c9d2b2d8cc 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -41,15 +41,16 @@ lifecycler: { ring: { - store: 'consul', heartbeat_timeout: '1m', replication_factor: 3, - - consul: { - host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace, - prefix: '', - httpclienttimeout: '20s', - consistentreads: true, + kvstore: { + store: 'consul', + consul: { + host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace, + prefix: '', + httpclienttimeout: '20s', + consistentreads: true, + }, }, }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/bucket_client.go new file mode 100644 index 0000000000000000000000000000000000000000..aabf1498ab2d5d6baa930908ee832e46362e19d1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/bucket_client.go @@ -0,0 +1,11 @@ +package chunk + +import ( + "context" + "time" +) + +// BucketClient is used to enforce retention on chunk buckets. +type BucketClient interface { + DeleteChunksBefore(ctx context.Context, ts time.Time) error +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go index ef13263e6c9c7bdf3e95cfc0cf06811f8ece4cf8..20194fbb927cd6c9a6d28db467518f4ba7500b87 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go @@ -41,10 +41,10 @@ func Fixtures() ([]testutils.Fixture, error) { } cfg := Config{ - addresses: addresses, - keyspace: "test", - consistency: "QUORUM", - replicationFactor: 1, + Addresses: addresses, + Keyspace: "test", + Consistency: "QUORUM", + ReplicationFactor: 1, } // Get a SchemaConfig with the defaults. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index a3b4f60691084ce0302379c5861a64147244f5f1..5d28a11b0bb9a936f91099c83ca3528ada8aa4dd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -20,40 +20,40 @@ const ( // Config for a StorageClient type Config struct { - addresses string - port int - keyspace string - consistency string - replicationFactor int - disableInitialHostLookup bool - ssl bool - hostVerification bool - caPath string - auth bool - username string - password string - timeout time.Duration + Addresses string `yaml:"addresses,omitempty"` + Port int `yaml:"port,omitempty"` + Keyspace string `yaml:"keyspace,omitempty"` + Consistency string `yaml:"consistency,omitempty"` + ReplicationFactor int `yaml:"replication_factor,omitempty"` + DisableInitialHostLookup bool `yaml:"disable_initial_host_lookup,omitempty"` + SSL bool `yaml:"SSL,omitempty"` + HostVerification bool `yaml:"host_verification,omitempty"` + CAPath string `yaml:"CA_path,omitempty"` + Auth bool `yaml:"auth,omitempty"` + Username string `yaml:"username,omitempty"` + Password string `yaml:"password,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.addresses, "cassandra.addresses", "", "Comma-separated hostnames or ips of Cassandra instances.") - f.IntVar(&cfg.port, "cassandra.port", 9042, "Port that Cassandra is running on") - f.StringVar(&cfg.keyspace, "cassandra.keyspace", "", "Keyspace to use in Cassandra.") - f.StringVar(&cfg.consistency, "cassandra.consistency", "QUORUM", "Consistency level for Cassandra.") - f.IntVar(&cfg.replicationFactor, "cassandra.replication-factor", 1, "Replication factor to use in Cassandra.") - f.BoolVar(&cfg.disableInitialHostLookup, "cassandra.disable-initial-host-lookup", false, "Instruct the cassandra driver to not attempt to get host info from the system.peers table.") - f.BoolVar(&cfg.ssl, "cassandra.ssl", false, "Use SSL when connecting to cassandra instances.") - f.BoolVar(&cfg.hostVerification, "cassandra.host-verification", true, "Require SSL certificate validation.") - f.StringVar(&cfg.caPath, "cassandra.ca-path", "", "Path to certificate file to verify the peer.") - f.BoolVar(&cfg.auth, "cassandra.auth", false, "Enable password authentication when connecting to cassandra.") - f.StringVar(&cfg.username, "cassandra.username", "", "Username to use when connecting to cassandra.") - f.StringVar(&cfg.password, "cassandra.password", "", "Password to use when connecting to cassandra.") - f.DurationVar(&cfg.timeout, "cassandra.timeout", 600*time.Millisecond, "Timeout when connecting to cassandra.") + f.StringVar(&cfg.Addresses, "cassandra.addresses", "", "Comma-separated hostnames or IPs of Cassandra instances.") + f.IntVar(&cfg.Port, "cassandra.port", 9042, "Port that Cassandra is running on") + f.StringVar(&cfg.Keyspace, "cassandra.keyspace", "", "Keyspace to use in Cassandra.") + f.StringVar(&cfg.Consistency, "cassandra.consistency", "QUORUM", "Consistency level for Cassandra.") + f.IntVar(&cfg.ReplicationFactor, "cassandra.replication-factor", 1, "Replication factor to use in Cassandra.") + f.BoolVar(&cfg.DisableInitialHostLookup, "cassandra.disable-initial-host-lookup", false, "Instruct the cassandra driver to not attempt to get host info from the system.peers table.") + f.BoolVar(&cfg.SSL, "cassandra.ssl", false, "Use SSL when connecting to cassandra instances.") + f.BoolVar(&cfg.HostVerification, "cassandra.host-verification", true, "Require SSL certificate validation.") + f.StringVar(&cfg.CAPath, "cassandra.ca-path", "", "Path to certificate file to verify the peer.") + f.BoolVar(&cfg.Auth, "cassandra.auth", false, "Enable password authentication when connecting to cassandra.") + f.StringVar(&cfg.Username, "cassandra.username", "", "Username to use when connecting to cassandra.") + f.StringVar(&cfg.Password, "cassandra.password", "", "Password to use when connecting to cassandra.") + f.DurationVar(&cfg.Timeout, "cassandra.timeout", 600*time.Millisecond, "Timeout when connecting to cassandra.") } func (cfg *Config) session() (*gocql.Session, error) { - consistency, err := gocql.ParseConsistencyWrapper(cfg.consistency) + consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) if err != nil { return nil, errors.WithStack(err) } @@ -62,13 +62,13 @@ func (cfg *Config) session() (*gocql.Session, error) { return nil, errors.WithStack(err) } - cluster := gocql.NewCluster(strings.Split(cfg.addresses, ",")...) - cluster.Port = cfg.port - cluster.Keyspace = cfg.keyspace + cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) + cluster.Port = cfg.Port + cluster.Keyspace = cfg.Keyspace cluster.Consistency = consistency cluster.BatchObserver = observer{} cluster.QueryObserver = observer{} - cluster.Timeout = cfg.timeout + cluster.Timeout = cfg.Timeout cfg.setClusterConfig(cluster) return cluster.CreateSession() @@ -76,26 +76,26 @@ func (cfg *Config) session() (*gocql.Session, error) { // apply config settings to a cassandra ClusterConfig func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) { - cluster.DisableInitialHostLookup = cfg.disableInitialHostLookup + cluster.DisableInitialHostLookup = cfg.DisableInitialHostLookup - if cfg.ssl { + if cfg.SSL { cluster.SslOpts = &gocql.SslOptions{ - CaPath: cfg.caPath, - EnableHostVerification: cfg.hostVerification, + CaPath: cfg.CAPath, + EnableHostVerification: cfg.HostVerification, } } - if cfg.auth { + if cfg.Auth { cluster.Authenticator = gocql.PasswordAuthenticator{ - Username: cfg.username, - Password: cfg.password, + Username: cfg.Username, + Password: cfg.Password, } } } // createKeyspace will create the desired keyspace if it doesn't exist. func (cfg *Config) createKeyspace() error { - cluster := gocql.NewCluster(strings.Split(cfg.addresses, ",")...) - cluster.Port = cfg.port + cluster := gocql.NewCluster(strings.Split(cfg.Addresses, ",")...) + cluster.Port = cfg.Port cluster.Keyspace = "system" cluster.Timeout = 20 * time.Second @@ -113,7 +113,7 @@ func (cfg *Config) createKeyspace() error { 'class' : 'SimpleStrategy', 'replication_factor' : %d }`, - cfg.keyspace, cfg.replicationFactor)).Exec() + cfg.Keyspace, cfg.ReplicationFactor)).Exec() return errors.WithStack(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go index e4335632ae45a606733813989ba8374970759510..48df881d557fb2616865f90ae10e07e8985893ff 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go @@ -28,7 +28,7 @@ func NewTableClient(ctx context.Context, cfg Config) (chunk.TableClient, error) } func (c *tableClient) ListTables(ctx context.Context) ([]string, error) { - md, err := c.session.KeyspaceMetadata(c.cfg.keyspace) + md, err := c.session.KeyspaceMetadata(c.cfg.Keyspace) if err != nil { return nil, errors.WithStack(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go index 009fd8eb3f9e271164adc993dc663a11b52f822f..c69e1be2a18db6e6d886224e6eb87edb30c43560 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go @@ -2,7 +2,6 @@ package chunk import ( "bytes" - "context" "encoding/binary" "fmt" "hash/crc32" @@ -14,12 +13,10 @@ import ( "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/golang/snappy" jsoniter "github.com/json-iterator/go" - ot "github.com/opentracing/opentracing-go" - otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" - "github.com/cortexproject/cortex/pkg/util" errs "github.com/weaveworks/common/errors" ) @@ -45,9 +42,9 @@ type Chunk struct { UserID string `json:"userID"` // These fields will be in all chunks, including old ones. - From model.Time `json:"from"` - Through model.Time `json:"through"` - Metric model.Metric `json:"metric"` + From model.Time `json:"from"` + Through model.Time `json:"through"` + Metric labels.Labels `json:"metric"` // The hash is not written to the external storage either. We use // crc32, Castagnoli table. See http://www.evanjones.ca/crc32c.html. @@ -69,7 +66,7 @@ type Chunk struct { } // NewChunk creates a new chunk -func NewChunk(userID string, fp model.Fingerprint, metric model.Metric, c prom_chunk.Chunk, from, through model.Time) Chunk { +func NewChunk(userID string, fp model.Fingerprint, metric labels.Labels, c prom_chunk.Chunk, from, through model.Time) Chunk { return Chunk{ Fingerprint: fp, UserID: userID, @@ -337,37 +334,6 @@ func equalByKey(a, b Chunk) bool { a.From == b.From && a.Through == b.Through && a.Checksum == b.Checksum } -// ChunksToMatrix converts a set of chunks to a model.Matrix. -func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error) { - sp, ctx := ot.StartSpanFromContext(ctx, "chunksToMatrix") - defer sp.Finish() - sp.LogFields(otlog.Int("chunks", len(chunks))) - - // Group chunks by series, sort and dedupe samples. - metrics := map[model.Fingerprint]model.Metric{} - samplesBySeries := map[model.Fingerprint][][]model.SamplePair{} - for _, c := range chunks { - ss, err := c.Samples(from, through) - if err != nil { - return nil, err - } - - metrics[c.Fingerprint] = c.Metric - samplesBySeries[c.Fingerprint] = append(samplesBySeries[c.Fingerprint], ss) - } - sp.LogFields(otlog.Int("series", len(samplesBySeries))) - - matrix := make(model.Matrix, 0, len(samplesBySeries)) - for fp, ss := range samplesBySeries { - matrix = append(matrix, &model.SampleStream{ - Metric: metrics[fp], - Values: util.MergeNSampleSets(ss...), - }) - } - - return matrix, nil -} - // Samples returns all SamplePairs for the chunk. func (c *Chunk) Samples(from, through model.Time) ([]model.SamplePair, error) { it := c.Data.NewIterator() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 836c5d4872f9998f9b28d82ed4f88b2883cf52fe..7ae86fc3de42973e5e9eb1d696e74b4851382729 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -61,6 +61,9 @@ type StoreConfig struct { MinChunkAge time.Duration `yaml:"min_chunk_age,omitempty"` CacheLookupsOlderThan time.Duration `yaml:"cache_lookups_older_than,omitempty"` + + // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. + MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -70,6 +73,7 @@ func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.") f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.") + f.DurationVar(&cfg.MaxLookBackPeriod, "store.max-look-back-period", 0, "Limit how long back data can be queried") // Deprecated. flagext.DeprecatedFlag(f, "store.cardinality-cache-size", "DEPRECATED. Use store.index-cache-read.enable-fifocache and store.index-cache-read.fifocache.size instead.") @@ -142,9 +146,9 @@ func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chun func (c *store) calculateIndexEntries(userID string, from, through model.Time, chunk Chunk) (WriteBatch, error) { seenIndexEntries := map[string]struct{}{} - metricName, err := extract.MetricNameFromMetric(chunk.Metric) - if err != nil { - return nil, err + metricName := chunk.Metric.Get(labels.MetricName) + if metricName == "" { + return nil, fmt.Errorf("no MetricNameLabel for chunk") } entries, err := c.schema.GetWriteEntries(from, through, userID, metricName, chunk.Metric, chunk.ExternalKey()) @@ -173,7 +177,7 @@ func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers . level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers)) // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, from, &through, allMatchers) + metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) if err != nil { return nil, err } else if shortcut { @@ -199,14 +203,14 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode return nil, err } - shortcut, err := c.validateQueryTimeRange(ctx, from, &through) + shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) if err != nil { return nil, err } else if shortcut { return nil, nil } - queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, model.LabelValue(metricName), model.LabelName(labelName)) + queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) if err != nil { return nil, err } @@ -230,11 +234,11 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode return result, nil } -func (c *store) validateQueryTimeRange(ctx context.Context, from model.Time, through *model.Time) (bool, error) { +func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, through *model.Time) (bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() - if *through < from { + if *through < *from { return false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from) } @@ -244,8 +248,8 @@ func (c *store) validateQueryTimeRange(ctx context.Context, from model.Time, thr } maxQueryLength := c.limits.MaxQueryLength(userID) - if maxQueryLength > 0 && (*through).Sub(from) > maxQueryLength { - return false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(from), maxQueryLength) + if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength { + return false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength) } now := model.Now() @@ -261,6 +265,13 @@ func (c *store) validateQueryTimeRange(ctx context.Context, from model.Time, thr return true, nil } + if c.cfg.MaxLookBackPeriod != 0 { + oldestStartTime := model.Now().Add(-c.cfg.MaxLookBackPeriod) + if oldestStartTime.After(*from) { + *from = oldestStartTime + } + } + if through.After(now.Add(5 * time.Minute)) { // time-span end is in future ... regard as legal level.Error(log).Log("msg", "adjusting end timerange from future to now", "old_through", through, "new_through", now) @@ -270,7 +281,7 @@ func (c *store) validateQueryTimeRange(ctx context.Context, from model.Time, thr return false, nil } -func (c *store) validateQuery(ctx context.Context, from model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { +func (c *store) validateQuery(ctx context.Context, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQuery") defer log.Span.Finish() @@ -342,7 +353,7 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, from, through mode // Just get chunks for metric if there are no matchers if len(matchers) == 0 { - queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, model.LabelValue(metricName)) + queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, metricName) if err != nil { return nil, err } @@ -372,9 +383,9 @@ func (c *store) lookupChunksByMetricName(ctx context.Context, from, through mode var queries []IndexQuery var err error if matcher.Type != labels.MatchEqual { - queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, model.LabelValue(metricName), model.LabelName(matcher.Name)) + queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) } else { - queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, model.LabelValue(metricName), model.LabelName(matcher.Name), model.LabelValue(matcher.Value)) + queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) } if err != nil { incomingErrors <- err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 28f323a4277499c2c3fc74b92b2f67c64999a576..c5e7cade69b63ce7dfa4645136ab930eb1966830 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -41,7 +41,7 @@ func filterChunksByMatchers(chunks []Chunk, filters []*labels.Matcher) []Chunk { outer: for _, chunk := range chunks { for _, filter := range filters { - if !filter.Matches(string(chunk.Metric[model.LabelName(filter.Name)])) { + if !filter.Matches(chunk.Metric.Get(filter.Name)) { continue outer } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go index abc073279758ba19cc07506faca3431a77bff714..2e47dbf23a732df30fa5f908012c87a1e73d719d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go @@ -1,30 +1,35 @@ package chunk +// Chunk functions used only in tests + import ( + "context" "time" + "github.com/cortexproject/cortex/pkg/util" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" ) -// BenchmarkMetric is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated -var BenchmarkMetric = model.Metric{ - model.MetricNameLabel: "container_cpu_usage_seconds_total", - "beta_kubernetes_io_arch": "amd64", - "beta_kubernetes_io_instance_type": "c3.somesize", - "beta_kubernetes_io_os": "linux", - "container_name": "some-name", - "cpu": "cpu01", - "failure_domain_beta_kubernetes_io_region": "somewhere-1", - "failure_domain_beta_kubernetes_io_zone": "somewhere-1b", - "id": "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28", - "image": "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506", - "instance": "ip-111-11-1-11.ec2.internal", - "job": "kubernetes-cadvisor", - "kubernetes_io_hostname": "ip-111-11-1-11", - "monitor": "prod", - "name": "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0", - "namespace": "kube-system", - "pod_name": "some-other-name-5j8s8", +// BenchmarkLabels is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated +var BenchmarkLabels = labels.Labels{ + {Name: model.MetricNameLabel, Value: "container_cpu_usage_seconds_total"}, + {Name: "beta_kubernetes_io_arch", Value: "amd64"}, + {Name: "beta_kubernetes_io_instance_type", Value: "c3.somesize"}, + {Name: "beta_kubernetes_io_os", Value: "linux"}, + {Name: "container_name", Value: "some-name"}, + {Name: "cpu", Value: "cpu01"}, + {Name: "failure_domain_beta_kubernetes_io_region", Value: "somewhere-1"}, + {Name: "failure_domain_beta_kubernetes_io_zone", Value: "somewhere-1b"}, + {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, + {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, + {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, + {Name: "job", Value: "kubernetes-cadvisor"}, + {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, + {Name: "monitor", Value: "prod"}, + {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, + {Name: "namespace", Value: "kube-system"}, + {Name: "pod_name", Value: "some-other-name-5j8s8"}, } // DefaultSchemaConfig creates a simple schema config for testing @@ -45,3 +50,29 @@ func DefaultSchemaConfig(store, schema string, from model.Time) SchemaConfig { }}, } } + +// ChunksToMatrix converts a set of chunks to a model.Matrix. +func ChunksToMatrix(ctx context.Context, chunks []Chunk, from, through model.Time) (model.Matrix, error) { + // Group chunks by series, sort and dedupe samples. + metrics := map[model.Fingerprint]model.Metric{} + samplesBySeries := map[model.Fingerprint][][]model.SamplePair{} + for _, c := range chunks { + ss, err := c.Samples(from, through) + if err != nil { + return nil, err + } + + metrics[c.Fingerprint] = util.LabelsToMetric(c.Metric) + samplesBySeries[c.Fingerprint] = append(samplesBySeries[c.Fingerprint], ss) + } + + matrix := make(model.Matrix, 0, len(samplesBySeries)) + for fp, ss := range samplesBySeries { + matrix = append(matrix, &model.SampleStream{ + Metric: metrics[fp], + Values: util.MergeNSampleSets(ss...), + }) + } + + return matrix, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go index fb0c576e17534674d7e0b227d098882bae2004b8..10ecb5c6f08b4bbb09acbed1b35b1d97ae8d531a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go @@ -1,28 +1,53 @@ package chunk import ( + "sort" "unsafe" jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" ) func init() { - jsoniter.RegisterTypeDecoderFunc("model.Metric", decodeMetric) + jsoniter.RegisterTypeDecoderFunc("labels.Labels", decodeLabels) + jsoniter.RegisterTypeEncoderFunc("labels.Labels", encodeLabels, labelsIsEmpty) jsoniter.RegisterTypeDecoderFunc("model.Time", decodeModelTime) jsoniter.RegisterTypeEncoderFunc("model.Time", encodeModelTime, modelTimeIsEmpty) } -// decoding model.Metric via ReadMapCB is faster than the generic jsoniter -// decoder because the latter allocates memory for each string via reflect. -func decodeMetric(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - mapPtr := (*model.Metric)(ptr) - *mapPtr = make(model.Metric, 10) +// Override Prometheus' labels.Labels decoder which goes via a map +func decodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + labelsPtr := (*labels.Labels)(ptr) + *labelsPtr = make(labels.Labels, 0, 10) iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { value := iter.ReadString() - (*mapPtr)[model.LabelName(key)] = model.LabelValue(value) + *labelsPtr = append(*labelsPtr, labels.Label{Name: key, Value: value}) return true }) + // Labels are always sorted, but earlier Cortex using a map would + // output in any order so we have to sort on read in + sort.Sort(*labelsPtr) +} + +// Override Prometheus' labels.Labels encoder which goes via a map +func encodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) { + labelsPtr := (*labels.Labels)(ptr) + stream.WriteObjectStart() + for i, v := range *labelsPtr { + if i != 0 { + stream.WriteMore() + } + stream.WriteString(v.Name) + stream.WriteRaw(`:`) + stream.WriteString(v.Value) + } + stream.WriteObjectEnd() +} + +func labelsIsEmpty(ptr unsafe.Pointer) bool { + labelsPtr := (*labels.Labels)(ptr) + return len(*labelsPtr) == 0 } // Decode via jsoniter's float64 routine is faster than getting the string data and decoding as two integers diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index 69ad4e6e1217d51d602489cf0a1ea260b6b33ef3..55f5416212ab72016f002dadedbc3a54fc68019a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -8,18 +8,22 @@ import ( "os" "path" "sync" + "time" "github.com/etcd-io/bbolt" + "github.com/go-kit/kit/log/level" "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" + "github.com/cortexproject/cortex/pkg/util" ) var bucketName = []byte("index") const ( - separator = "\000" - null = string('\xff') + separator = "\000" + null = string('\xff') + dbReloadPeriod = 10 * time.Minute ) // BoltDBConfig for a BoltDB index client. @@ -37,6 +41,8 @@ type boltIndexClient struct { dbsMtx sync.RWMutex dbs map[string]*bbolt.DB + done chan struct{} + wait sync.WaitGroup } // NewBoltDBIndexClient creates a new IndexClient that used BoltDB. @@ -45,18 +51,71 @@ func NewBoltDBIndexClient(cfg BoltDBConfig) (chunk.IndexClient, error) { return nil, err } - return &boltIndexClient{ - cfg: cfg, - dbs: map[string]*bbolt.DB{}, - }, nil + indexClient := &boltIndexClient{ + cfg: cfg, + dbs: map[string]*bbolt.DB{}, + done: make(chan struct{}), + } + + indexClient.wait.Add(1) + go indexClient.loop() + return indexClient, nil +} + +func (b *boltIndexClient) loop() { + defer b.wait.Done() + + ticker := time.NewTicker(dbReloadPeriod) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + b.reload() + case <-b.done: + return + } + } +} + +func (b *boltIndexClient) reload() { + b.dbsMtx.RLock() + + removedDBs := []string{} + for name := range b.dbs { + if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil && os.IsNotExist(err) { + removedDBs = append(removedDBs, name) + level.Debug(util.Logger).Log("msg", "boltdb file got removed", "filename", name) + continue + } + } + b.dbsMtx.RUnlock() + + if len(removedDBs) != 0 { + b.dbsMtx.Lock() + defer b.dbsMtx.Unlock() + + for _, name := range removedDBs { + if err := b.dbs[name].Close(); err != nil { + level.Error(util.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) + continue + } + delete(b.dbs, name) + } + } + } func (b *boltIndexClient) Stop() { + close(b.done) + b.dbsMtx.Lock() defer b.dbsMtx.Unlock() for _, db := range b.dbs { db.Close() } + + b.wait.Wait() } func (b *boltIndexClient) NewWriteBatch() chunk.WriteBatch { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go index 72c568772fa41c7afd6e6c60f7b8353e949025e7..4c3659a429e905de0fd50cacc86b582d3765d70f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go @@ -2,19 +2,37 @@ package local import ( "context" + "os" + "path/filepath" "github.com/cortexproject/cortex/pkg/chunk" ) -type tableClient struct{} +type tableClient struct { + directory string +} // NewTableClient returns a new TableClient. -func NewTableClient() (chunk.TableClient, error) { - return &tableClient{}, nil +func NewTableClient(directory string) (chunk.TableClient, error) { + return &tableClient{directory: directory}, nil } func (c *tableClient) ListTables(ctx context.Context) ([]string, error) { - return nil, nil + boltDbFiles := []string{} + err := filepath.Walk(c.directory, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + boltDbFiles = append(boltDbFiles, info.Name()) + } + return nil + }) + + if err != nil { + return nil, err + } + return boltDbFiles, nil } func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { @@ -22,7 +40,7 @@ func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) err } func (c *tableClient) DeleteTable(ctx context.Context, name string) error { - return nil + return os.Remove(filepath.Join(c.directory, name)) } func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go index 323a51ae8c130f8327284ccbae7df8b1c5eb6ec9..cc7673cab95410f64dcdff25c15034d00c782c46 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go @@ -43,7 +43,7 @@ func (f *fixture) Clients() ( return } - tableClient, err = NewTableClient() + tableClient, err = NewTableClient(f.dirname) if err != nil { return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index 164e877b8628ec42badfa2b1df7eef080c45352b..3d58753a9ba3cc33e00679a0ed34a7a0161820d9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -5,13 +5,19 @@ import ( "encoding/base64" "flag" "io/ioutil" + "os" "path" + "path/filepath" + "time" + + "github.com/go-kit/kit/log/level" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" + pkgUtil "github.com/cortexproject/cortex/pkg/util" ) -// FSConfig is the config for a fsObjectClient. +// FSConfig is the config for a FSObjectClient. type FSConfig struct { Directory string `yaml:"directory"` } @@ -21,24 +27,27 @@ func (cfg *FSConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Directory, "local.chunk-directory", "", "Directory to store chunks in.") } -type fsObjectClient struct { +// FSObjectClient holds config for filesystem as object store +type FSObjectClient struct { cfg FSConfig } // NewFSObjectClient makes a chunk.ObjectClient which stores chunks as files in the local filesystem. -func NewFSObjectClient(cfg FSConfig) (chunk.ObjectClient, error) { +func NewFSObjectClient(cfg FSConfig) (*FSObjectClient, error) { if err := ensureDirectory(cfg.Directory); err != nil { return nil, err } - return &fsObjectClient{ + return &FSObjectClient{ cfg: cfg, }, nil } -func (fsObjectClient) Stop() {} +// Stop implements ObjectClient +func (FSObjectClient) Stop() {} -func (f *fsObjectClient) PutChunks(_ context.Context, chunks []chunk.Chunk) error { +// PutChunks implements ObjectClient +func (f *FSObjectClient) PutChunks(_ context.Context, chunks []chunk.Chunk) error { for i := range chunks { buf, err := chunks[i].Encoded() if err != nil { @@ -53,11 +62,12 @@ func (f *fsObjectClient) PutChunks(_ context.Context, chunks []chunk.Chunk) erro return nil } -func (f *fsObjectClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { +// GetChunks implements ObjectClient +func (f *FSObjectClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { return util.GetParallelChunks(ctx, chunks, f.getChunk) } -func (f *fsObjectClient) getChunk(_ context.Context, decodeContext *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { +func (f *FSObjectClient) getChunk(_ context.Context, decodeContext *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { filename := base64.StdEncoding.EncodeToString([]byte(c.ExternalKey())) buf, err := ioutil.ReadFile(path.Join(f.cfg.Directory, filename)) if err != nil { @@ -70,3 +80,16 @@ func (f *fsObjectClient) getChunk(_ context.Context, decodeContext *chunk.Decode return c, nil } + +// DeleteChunksBefore implements BucketClient +func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error { + return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() && info.ModTime().Before(ts) { + level.Info(pkgUtil.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) + if err := os.Remove(path); err != nil { + return err + } + } + return nil + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index 307f4c1d60303bbdcc889d6678086aa4df4e7d7d..0e489b8b41a03bc224d20b6d92fd960862d0d8ca 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" ) var ( @@ -29,17 +30,17 @@ var ( // to write or read chunks from the external index. type Schema interface { // When doing a write, use this method to return the list of entries you should write to. - GetWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) + GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) // Should only be used with the seriesStore. TODO: Make seriesStore implement a different interface altogether. - GetLabelWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) - GetChunkWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) - GetLabelEntryCacheKeys(from, through model.Time, userID string, labels model.Metric) []string + GetLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) + GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) + GetLabelEntryCacheKeys(from, through model.Time, userID string, labels labels.Labels) []string // When doing a read, use these methods to return the list of entries you should query - GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) - GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) - GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) + GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) + GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) + GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) // If the query resulted in series IDs, use this method to find chunks. GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) @@ -82,7 +83,7 @@ type schema struct { entries entries } -func (s schema) GetWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (s schema) GetWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { var result []IndexEntry for _, bucket := range s.buckets(from, through, userID) { @@ -95,7 +96,7 @@ func (s schema) GetWriteEntries(from, through model.Time, userID string, metricN return result, nil } -func (s schema) GetLabelWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (s schema) GetLabelWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { var result []IndexEntry for _, bucket := range s.buckets(from, through, userID) { @@ -108,7 +109,7 @@ func (s schema) GetLabelWriteEntries(from, through model.Time, userID string, me return result, nil } -func (s schema) GetChunkWriteEntries(from, through model.Time, userID string, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (s schema) GetChunkWriteEntries(from, through model.Time, userID string, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { var result []IndexEntry for _, bucket := range s.buckets(from, through, userID) { @@ -123,7 +124,7 @@ func (s schema) GetChunkWriteEntries(from, through model.Time, userID string, me } // Should only used for v9Schema -func (s schema) GetLabelEntryCacheKeys(from, through model.Time, userID string, labels model.Metric) []string { +func (s schema) GetLabelEntryCacheKeys(from, through model.Time, userID string, labels labels.Labels) []string { var result []string for _, bucket := range s.buckets(from, through, userID) { key := strings.Join([]string{ @@ -140,7 +141,7 @@ func (s schema) GetLabelEntryCacheKeys(from, through model.Time, userID string, return result } -func (s schema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) { +func (s schema) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -154,7 +155,7 @@ func (s schema) GetReadQueriesForMetric(from, through model.Time, userID string, return result, nil } -func (s schema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (s schema) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -168,7 +169,7 @@ func (s schema) GetReadQueriesForMetricLabel(from, through model.Time, userID st return result, nil } -func (s schema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (s schema) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { var result []IndexQuery buckets := s.buckets(from, through, userID) @@ -197,13 +198,13 @@ func (s schema) GetChunksForSeries(from, through model.Time, userID string, seri } type entries interface { - GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) - GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) - GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) + GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) + GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) + GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) - GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) - GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) - GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) + GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) + GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) + GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) GetChunksForSeries(bucket Bucket, seriesID []byte) ([]IndexQuery, error) } @@ -213,60 +214,60 @@ type entries interface { type originalEntries struct{} -func (originalEntries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (originalEntries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { chunkIDBytes := []byte(chunkID) result := []IndexEntry{} - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - if strings.ContainsRune(string(value), '\x00') { + if strings.ContainsRune(string(v.Value), '\x00') { return nil, fmt.Errorf("label values cannot contain null byte") } result = append(result, IndexEntry{ TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), - RangeValue: encodeRangeKey([]byte(key), []byte(value), chunkIDBytes), + HashValue: bucket.hashKey + ":" + metricName, + RangeValue: encodeRangeKey([]byte(v.Name), []byte(v.Value), chunkIDBytes), }) } return result, nil } -func (originalEntries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (originalEntries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (originalEntries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (originalEntries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (originalEntries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (originalEntries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValuePrefix: nil, }, }, nil } -func (originalEntries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (originalEntries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValuePrefix: encodeRangeKey([]byte(labelName)), }, }, nil } -func (originalEntries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (originalEntries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { if strings.ContainsRune(string(labelValue), '\x00') { return nil, fmt.Errorf("label values cannot contain null byte") } return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValuePrefix: encodeRangeKey([]byte(labelName), []byte(labelValue)), }, }, nil @@ -283,37 +284,37 @@ type base64Entries struct { originalEntries } -func (base64Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (base64Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { chunkIDBytes := []byte(chunkID) result := []IndexEntry{} - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - encodedBytes := encodeBase64Value(value) + encodedBytes := encodeBase64Value(v.Value) result = append(result, IndexEntry{ TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), - RangeValue: encodeRangeKey([]byte(key), encodedBytes, chunkIDBytes, chunkTimeRangeKeyV1), + HashValue: bucket.hashKey + ":" + metricName, + RangeValue: encodeRangeKey([]byte(v.Name), encodedBytes, chunkIDBytes, chunkTimeRangeKeyV1), }) } return result, nil } -func (base64Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (base64Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (base64Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (base64Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (base64Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (base64Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { encodedBytes := encodeBase64Value(labelValue) return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValuePrefix: encodeRangeKey([]byte(labelName), encodedBytes), }, }, nil @@ -326,24 +327,24 @@ func (base64Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName mo // - range key: \0\0<chunk name>\0<version 3> type labelNameInHashKeyEntries struct{} -func (labelNameInHashKeyEntries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (labelNameInHashKeyEntries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { chunkIDBytes := []byte(chunkID) entries := []IndexEntry{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValue: encodeRangeKey(nil, nil, chunkIDBytes, chunkTimeRangeKeyV2), }, } - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - encodedBytes := encodeBase64Value(value) + encodedBytes := encodeBase64Value(v.Value) entries = append(entries, IndexEntry{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, key), + HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, v.Name), RangeValue: encodeRangeKey(nil, encodedBytes, chunkIDBytes, chunkTimeRangeKeyV1), }) } @@ -351,23 +352,23 @@ func (labelNameInHashKeyEntries) GetWriteEntries(bucket Bucket, metricName model return entries, nil } -func (labelNameInHashKeyEntries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (labelNameInHashKeyEntries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (labelNameInHashKeyEntries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (labelNameInHashKeyEntries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (labelNameInHashKeyEntries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (labelNameInHashKeyEntries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, }, }, nil } -func (labelNameInHashKeyEntries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (labelNameInHashKeyEntries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, @@ -376,7 +377,7 @@ func (labelNameInHashKeyEntries) GetReadMetricLabelQueries(bucket Bucket, metric }, nil } -func (labelNameInHashKeyEntries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (labelNameInHashKeyEntries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { encodedBytes := encodeBase64Value(labelValue) return []IndexQuery{ { @@ -396,26 +397,26 @@ func (labelNameInHashKeyEntries) GetChunksForSeries(_ Bucket, _ []byte) ([]Index // so the chunk end times are ignored. type v5Entries struct{} -func (v5Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v5Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { chunkIDBytes := []byte(chunkID) encodedThroughBytes := encodeTime(bucket.through) entries := []IndexEntry{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValue: encodeRangeKey(encodedThroughBytes, nil, chunkIDBytes, chunkTimeRangeKeyV3), }, } - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - encodedValueBytes := encodeBase64Value(value) + encodedValueBytes := encodeBase64Value(v.Value) entries = append(entries, IndexEntry{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, key), + HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, v.Name), RangeValue: encodeRangeKey(encodedThroughBytes, encodedValueBytes, chunkIDBytes, chunkTimeRangeKeyV4), }) } @@ -423,23 +424,23 @@ func (v5Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, lab return entries, nil } -func (v5Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v5Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (v5Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v5Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (v5Entries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (v5Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, }, }, nil } -func (v5Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (v5Entries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, @@ -448,7 +449,7 @@ func (v5Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.Label }, nil } -func (v5Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, _ model.LabelValue) ([]IndexQuery, error) { +func (v5Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, _ string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, @@ -465,52 +466,52 @@ func (v5Entries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { // moves label value out of range key (see #199). type v6Entries struct{} -func (v6Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v6Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { chunkIDBytes := []byte(chunkID) encodedThroughBytes := encodeTime(bucket.through) entries := []IndexEntry{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValue: encodeRangeKey(encodedThroughBytes, nil, chunkIDBytes, chunkTimeRangeKeyV3), }, } - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } entries = append(entries, IndexEntry{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, key), + HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, v.Name), RangeValue: encodeRangeKey(encodedThroughBytes, nil, chunkIDBytes, chunkTimeRangeKeyV5), - Value: []byte(value), + Value: []byte(v.Value), }) } return entries, nil } -func (v6Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v6Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (v6Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v6Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (v6Entries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (v6Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { encodedFromBytes := encodeTime(bucket.from) return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValueStart: encodeRangeKey(encodedFromBytes), }, }, nil } -func (v6Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (v6Entries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { encodedFromBytes := encodeTime(bucket.from) return []IndexQuery{ { @@ -521,7 +522,7 @@ func (v6Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.Label }, nil } -func (v6Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (v6Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { encodedFromBytes := encodeTime(bucket.from) return []IndexQuery{ { @@ -541,41 +542,41 @@ func (v6Entries) GetChunksForSeries(_ Bucket, _ []byte) ([]IndexQuery, error) { type v9Entries struct { } -func (v9Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v9Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (v9Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v9Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := sha256bytes(labels.String()) entries := []IndexEntry{ // Entry for metricName -> seriesID { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, RangeValue: encodeRangeKey(seriesID, nil, nil, seriesRangeKeyV1), }, } // Entries for metricName:labelName -> hash(value):seriesID // We use a hash of the value to limit its length. - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - valueHash := sha256bytes(string(value)) + valueHash := sha256bytes(v.Value) entries = append(entries, IndexEntry{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, key), + HashValue: fmt.Sprintf("%s:%s:%s", bucket.hashKey, metricName, v.Name), RangeValue: encodeRangeKey(valueHash, seriesID, nil, labelSeriesRangeKeyV1), - Value: []byte(value), + Value: []byte(v.Value), }) } return entries, nil } -func (v9Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v9Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := sha256bytes(labels.String()) encodedThroughBytes := encodeTime(bucket.through) @@ -591,16 +592,16 @@ func (v9Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue return entries, nil } -func (v9Entries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (v9Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, - HashValue: bucket.hashKey + ":" + string(metricName), + HashValue: bucket.hashKey + ":" + metricName, }, }, nil } -func (v9Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (v9Entries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { return []IndexQuery{ { TableName: bucket.tableName, @@ -609,8 +610,8 @@ func (v9Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.Label }, nil } -func (v9Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { - valueHash := sha256bytes(string(labelValue)) +func (v9Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { + valueHash := sha256bytes(labelValue) return []IndexQuery{ { TableName: bucket.tableName, @@ -637,11 +638,11 @@ type v10Entries struct { rowShards uint32 } -func (v10Entries) GetWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v10Entries) GetWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { return nil, ErrNotSupported } -func (s v10Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (s v10Entries) GetLabelWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := sha256bytes(labels.String()) // read first 32 bits of the hash and use this to calculate the shard @@ -651,30 +652,30 @@ func (s v10Entries) GetLabelWriteEntries(bucket Bucket, metricName model.LabelVa // Entry for metricName -> seriesID { TableName: bucket.tableName, - HashValue: fmt.Sprintf("%02d:%s:%s", shard, bucket.hashKey, string(metricName)), + HashValue: fmt.Sprintf("%02d:%s:%s", shard, bucket.hashKey, metricName), RangeValue: encodeRangeKey(seriesID, nil, nil, seriesRangeKeyV1), }, } // Entries for metricName:labelName -> hash(value):seriesID // We use a hash of the value to limit its length. - for key, value := range labels { - if key == model.MetricNameLabel { + for _, v := range labels { + if v.Name == model.MetricNameLabel { continue } - valueHash := sha256bytes(string(value)) + valueHash := sha256bytes(v.Value) entries = append(entries, IndexEntry{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%02d:%s:%s:%s", shard, bucket.hashKey, metricName, key), + HashValue: fmt.Sprintf("%02d:%s:%s:%s", shard, bucket.hashKey, metricName, v.Name), RangeValue: encodeRangeKey(valueHash, seriesID, nil, labelSeriesRangeKeyV1), - Value: []byte(value), + Value: []byte(v.Value), }) } return entries, nil } -func (v10Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValue, labels model.Metric, chunkID string) ([]IndexEntry, error) { +func (v10Entries) GetChunkWriteEntries(bucket Bucket, metricName string, labels labels.Labels, chunkID string) ([]IndexEntry, error) { seriesID := sha256bytes(labels.String()) encodedThroughBytes := encodeTime(bucket.through) @@ -690,18 +691,18 @@ func (v10Entries) GetChunkWriteEntries(bucket Bucket, metricName model.LabelValu return entries, nil } -func (s v10Entries) GetReadMetricQueries(bucket Bucket, metricName model.LabelValue) ([]IndexQuery, error) { +func (s v10Entries) GetReadMetricQueries(bucket Bucket, metricName string) ([]IndexQuery, error) { result := make([]IndexQuery, 0, s.rowShards) for i := uint32(0); i < s.rowShards; i++ { result = append(result, IndexQuery{ TableName: bucket.tableName, - HashValue: fmt.Sprintf("%02d:%s:%s", i, bucket.hashKey, string(metricName)), + HashValue: fmt.Sprintf("%02d:%s:%s", i, bucket.hashKey, metricName), }) } return result, nil } -func (s v10Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (s v10Entries) GetReadMetricLabelQueries(bucket Bucket, metricName string, labelName string) ([]IndexQuery, error) { result := make([]IndexQuery, 0, s.rowShards) for i := uint32(0); i < s.rowShards; i++ { result = append(result, IndexQuery{ @@ -712,8 +713,8 @@ func (s v10Entries) GetReadMetricLabelQueries(bucket Bucket, metricName model.La return result, nil } -func (s v10Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { - valueHash := sha256bytes(string(labelValue)) +func (s v10Entries) GetReadMetricLabelValueQueries(bucket Bucket, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { + valueHash := sha256bytes(labelValue) result := make([]IndexQuery, 0, s.rowShards) for i := uint32(0); i < s.rowShards; i++ { result = append(result, IndexQuery{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go index 2444b0c458df5ee26492ae3516f376ae97414a5f..c20e74968efb6baf38ffd44c0851b3d63eb454eb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go @@ -13,7 +13,7 @@ type schemaCaching struct { cacheOlderThan time.Duration } -func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) { +func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) cacheableQueries, err := s.Schema.GetReadQueriesForMetric(cFrom, cThrough, userID, metricName) @@ -29,7 +29,7 @@ func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil } -func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { +func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabel(cFrom, cThrough, userID, metricName, labelName) @@ -45,7 +45,7 @@ func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, u return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil } -func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { +func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabelValue(cFrom, cThrough, userID, metricName, labelName, labelValue) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go index bbf5d5074dc119195d7988829af1f00060eea9f6..9d9235c39d6973a0d9e27d4031fb584e86bb3d0f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_util.go @@ -59,7 +59,7 @@ func encodeBase64Bytes(bytes []byte) []byte { return encoded } -func encodeBase64Value(value model.LabelValue) []byte { +func encodeBase64Value(value string) []byte { encodedLen := base64.RawStdEncoding.EncodedLen(len(value)) encoded := make([]byte, encodedLen, encodedLen) base64.RawStdEncoding.Encode(encoded, []byte(value)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index fc12849f9223c2e16f55c697e31988c8335b7f82..bfd9288d82e76049665e5aab78a4772210ef1096 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -16,7 +16,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -154,7 +153,7 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time } // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, from, &through, allMatchers) + metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) if err != nil { return nil, nil, err } else if shortcut { @@ -277,13 +276,13 @@ func (c *seriesStore) lookupSeriesByMetricNameMatcher(ctx context.Context, from, var queries []IndexQuery var labelName string if matcher == nil { - queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, model.LabelValue(metricName)) + queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName) } else if matcher.Type != labels.MatchEqual { labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, model.LabelValue(metricName), model.LabelName(matcher.Name)) + queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name) } else { labelName = matcher.Name - queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, model.LabelValue(metricName), model.LabelName(matcher.Name), model.LabelValue(matcher.Value)) + queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value) } if err != nil { return nil, err @@ -384,11 +383,10 @@ func (c *seriesStore) calculateIndexEntries(from, through model.Time, chunk Chun entries := []IndexEntry{} keysToCache := []string{} - metricName, err := extract.MetricNameFromMetric(chunk.Metric) - if err != nil { - return nil, nil, err + metricName := chunk.Metric.Get(labels.MetricName) + if metricName == "" { + return nil, nil, fmt.Errorf("no MetricNameLabel for chunk") } - keys := c.schema.GetLabelEntryCacheKeys(from, through, chunk.UserID, chunk.Metric) cacheKeys := make([]string, 0, len(keys)) // Keys which translate to the strings stored in the cache. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go index 761e22f4b80a21169f020e90c3d5fd6fb6916ef2..96a81e59cbf0c57f3723274fe37b9eeeb405b5b2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto +// source: caching_index_client.proto package storage @@ -32,7 +32,7 @@ type Entry struct { func (m *Entry) Reset() { *m = Entry{} } func (*Entry) ProtoMessage() {} func (*Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_a60039d4a2d816f6, []int{0} + return fileDescriptor_6a83955bbc783296, []int{0} } func (m *Entry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ type ReadBatch struct { func (m *ReadBatch) Reset() { *m = ReadBatch{} } func (*ReadBatch) ProtoMessage() {} func (*ReadBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_a60039d4a2d816f6, []int{1} + return fileDescriptor_6a83955bbc783296, []int{1} } func (m *ReadBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,33 +136,30 @@ func init() { proto.RegisterType((*ReadBatch)(nil), "storage.ReadBatch") } -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_a60039d4a2d816f6) -} +func init() { proto.RegisterFile("caching_index_client.proto", fileDescriptor_6a83955bbc783296) } -var fileDescriptor_a60039d4a2d816f6 = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x00, 0x44, 0x63, 0xd2, 0xa4, 0xaa, 0x0b, 0x08, 0x65, 0x40, 0x11, 0x83, 0x1b, 0x15, 0x21, 0x65, - 0x21, 0x91, 0x80, 0x2f, 0x08, 0x62, 0x63, 0x0a, 0x12, 0x6b, 0xe5, 0xba, 0x26, 0x31, 0x4d, 0xed, - 0xc8, 0x75, 0x50, 0xb3, 0xb1, 0xb1, 0xf2, 0x19, 0x7c, 0x4a, 0xc7, 0x8e, 0x15, 0x43, 0x45, 0xdd, - 0x85, 0xb1, 0x9f, 0x80, 0x6a, 0x82, 0xd4, 0x81, 0xed, 0x9e, 0xef, 0x7c, 0x67, 0x19, 0xde, 0x67, - 0x4c, 0xe5, 0xd5, 0x30, 0x22, 0x62, 0x12, 0x13, 0x21, 0x15, 0x9d, 0x95, 0x52, 0x3c, 0x53, 0xa2, - 0x1a, 0x8a, 0xcb, 0x71, 0x16, 0x93, 0xbc, 0xe2, 0xe3, 0x78, 0xaa, 0x84, 0xc4, 0x19, 0x8d, 0x09, - 0x26, 0x39, 0xe3, 0xd9, 0x80, 0xf1, 0x11, 0x9d, 0x0d, 0x48, 0xc1, 0x28, 0x57, 0x51, 0x29, 0x85, - 0x12, 0x5e, 0xbb, 0xc9, 0x9c, 0x5d, 0xee, 0xd5, 0x66, 0x22, 0x13, 0xb1, 0xf1, 0x87, 0xd5, 0x93, - 0x21, 0x03, 0x46, 0xfd, 0xde, 0xeb, 0x3f, 0x40, 0xe7, 0x8e, 0x2b, 0x59, 0x7b, 0x17, 0xd0, 0xbd, - 0x15, 0x45, 0x35, 0xe1, 0x3e, 0x08, 0x40, 0x78, 0x98, 0x1c, 0xcd, 0x57, 0x3d, 0xeb, 0x73, 0xd5, - 0x73, 0x92, 0x5a, 0xd1, 0x69, 0xea, 0x12, 0x63, 0x7a, 0xe7, 0xd0, 0x79, 0xc4, 0x45, 0x45, 0xfd, - 0x83, 0xff, 0x52, 0xce, 0xcb, 0xce, 0xeb, 0xbf, 0x01, 0xd8, 0x49, 0x29, 0x1e, 0x25, 0x58, 0x91, - 0xdc, 0x8b, 0x60, 0x9b, 0x72, 0x25, 0x19, 0x9d, 0xfa, 0x20, 0xb0, 0xc3, 0xee, 0xd5, 0x71, 0xd4, - 0x3c, 0x36, 0x32, 0xd3, 0x49, 0x6b, 0x57, 0x92, 0xfe, 0x85, 0xbc, 0x13, 0x68, 0x8f, 0x69, 0x6d, - 0x06, 0x3a, 0xe9, 0x4e, 0x7a, 0xa7, 0xd0, 0xa5, 0xb3, 0x92, 0xc9, 0xda, 0xb7, 0x03, 0x10, 0xda, - 0x69, 0x43, 0x5e, 0x00, 0xbb, 0x04, 0xcb, 0x11, 0xe3, 0xb8, 0x60, 0xaa, 0xf6, 0x5b, 0x01, 0x08, - 0x9d, 0x74, 0xff, 0x28, 0xb9, 0x59, 0xac, 0x91, 0xb5, 0x5c, 0x23, 0x6b, 0xbb, 0x46, 0xe0, 0x55, - 0x23, 0xf0, 0xa1, 0x11, 0x98, 0x6b, 0x04, 0x16, 0x1a, 0x81, 0x2f, 0x8d, 0xc0, 0xb7, 0x46, 0xd6, - 0x56, 0x23, 0xf0, 0xbe, 0x41, 0xd6, 0x62, 0x83, 0xac, 0xe5, 0x06, 0x59, 0x43, 0xd7, 0xfc, 0xcd, - 0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xd2, 0x5d, 0xd9, 0xa3, 0x01, 0x00, 0x00, +var fileDescriptor_6a83955bbc783296 = []byte{ + // 313 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0xed, 0x3f, 0x4d, 0xaa, 0xba, 0x3f, 0x08, 0x79, 0x40, 0x51, 0x87, 0xdb, 0xa8, 0x08, + 0x29, 0x0b, 0xa9, 0x04, 0x3c, 0x41, 0x10, 0x2f, 0x10, 0x24, 0xd6, 0xca, 0x75, 0x4d, 0x6a, 0x91, + 0xda, 0x55, 0xea, 0xa0, 0x66, 0x63, 0x63, 0xe5, 0x31, 0x78, 0x94, 0x8e, 0x1d, 0x2b, 0x86, 0x8a, + 0xba, 0x0b, 0x63, 0x1f, 0x01, 0xd5, 0x14, 0x89, 0x81, 0xed, 0x1c, 0x7f, 0xc7, 0xf7, 0x5c, 0x5d, + 0xd2, 0xe1, 0x8c, 0x8f, 0xa5, 0xca, 0x07, 0x52, 0x8d, 0xc4, 0x7c, 0xc0, 0x0b, 0x29, 0x94, 0x49, + 0xa6, 0xa5, 0x36, 0x9a, 0x36, 0x67, 0x46, 0x97, 0x2c, 0x17, 0x9d, 0x8b, 0x5c, 0x9a, 0x71, 0x35, + 0x4c, 0xb8, 0x9e, 0xf4, 0x73, 0x9d, 0xeb, 0xbe, 0xe3, 0xc3, 0xea, 0xc1, 0x39, 0x67, 0x9c, 0xfa, + 0xfe, 0xd7, 0xbb, 0x23, 0xfe, 0xad, 0x32, 0x65, 0x4d, 0xcf, 0x49, 0x70, 0xa3, 0x8b, 0x6a, 0xa2, + 0x42, 0x1c, 0xe1, 0xf8, 0x7f, 0x7a, 0xb4, 0x58, 0x77, 0xd1, 0xfb, 0xba, 0xeb, 0xa7, 0xb5, 0x11, + 0xb3, 0x2c, 0xe0, 0x0e, 0xd2, 0x33, 0xe2, 0xdf, 0xb3, 0xa2, 0x12, 0xe1, 0xbf, 0xbf, 0x52, 0xfe, + 0xd3, 0x9e, 0xf5, 0x5e, 0x30, 0x69, 0x65, 0x82, 0x8d, 0x52, 0x66, 0xf8, 0x98, 0x26, 0xa4, 0x29, + 0x94, 0x29, 0xa5, 0x98, 0x85, 0x38, 0xf2, 0xe2, 0xf6, 0xe5, 0x71, 0x72, 0x58, 0x36, 0x71, 0xd5, + 0x69, 0x63, 0x3f, 0x24, 0xfb, 0x09, 0xd1, 0x13, 0xe2, 0x3d, 0x8a, 0xda, 0x15, 0xb4, 0xb2, 0xbd, + 0xa4, 0xa7, 0x24, 0x10, 0xf3, 0xa9, 0x2c, 0xeb, 0xd0, 0x8b, 0x70, 0xec, 0x65, 0x07, 0x47, 0x23, + 0xd2, 0xe6, 0xac, 0x1c, 0x49, 0xc5, 0x0a, 0x69, 0xea, 0xb0, 0x11, 0xe1, 0xd8, 0xcf, 0x7e, 0x3f, + 0xa5, 0xd7, 0xcb, 0x0d, 0xa0, 0xd5, 0x06, 0xd0, 0x6e, 0x03, 0xf8, 0xd9, 0x02, 0x7e, 0xb3, 0x80, + 0x17, 0x16, 0xf0, 0xd2, 0x02, 0xfe, 0xb0, 0x80, 0x3f, 0x2d, 0xa0, 0x9d, 0x05, 0xfc, 0xba, 0x05, + 0xb4, 0xdc, 0x02, 0x5a, 0x6d, 0x01, 0x0d, 0x03, 0x77, 0x9b, 0xab, 0xaf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xce, 0x7e, 0x67, 0x82, 0x71, 0x01, 0x00, 0x00, } func (this *Entry) Equal(that interface{}) bool { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 30d3cd17648106003bbb758cc3fa124641369ef4..7502dc92c714b3e2cbde8a025656e1c7bb2c2abb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -115,7 +115,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun case "boltdb": return local.NewBoltDBIndexClient(cfg.BoltDBConfig) default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, gcp, cassandra, inmemory", name) + return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, cassandra, inmemory, gcp, bigtable, bigtable-hashed", name) } } @@ -138,7 +138,7 @@ func NewObjectClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chu return aws.NewDynamoDBObjectClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) case "gcp": return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) - case "gcp-columnkey", "bigtable": + case "gcp-columnkey", "bigtable", "bigtable-hashed": return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcs": return gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, schemaCfg) @@ -147,7 +147,7 @@ func NewObjectClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chu case "filesystem": return local.NewFSObjectClient(cfg.FSConfig) default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, gcp, cassandra, inmemory", name) + return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, cassandra, inmemory, gcp, bigtable, bigtable-hashed", name) } } @@ -167,8 +167,17 @@ func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { case "cassandra": return cassandra.NewTableClient(context.Background(), cfg.CassandraStorageConfig) case "boltdb": - return local.NewTableClient() + return local.NewTableClient(cfg.BoltDBConfig.Directory) default: - return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, gcp, inmemory", name) + return nil, fmt.Errorf("Unrecognized storage client %v, choose one of: aws, cassandra, inmemory, gcp, bigtable, bigtable-hashed", name) } } + +// NewBucketClient makes a new bucket client based on the configuration. +func NewBucketClient(storageConfig Config) (chunk.BucketClient, error) { + if storageConfig.FSConfig.Directory != "" { + return local.NewFSObjectClient(storageConfig.FSConfig) + } + + return nil, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index 9bbe325da24d7e57e69f9b20c6d96daf5a28ff58..5404978be5ce73b8ffdea8a1c9309bb4a13631b7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -21,6 +21,8 @@ import ( const ( readLabel = "read" writeLabel = "write" + + bucketRetentionEnforcementInterval = 12 * time.Hour ) var ( @@ -45,39 +47,39 @@ func init() { // TableManagerConfig holds config for a TableManager type TableManagerConfig struct { // Master 'off-switch' for table capacity updates, e.g. when troubleshooting - ThroughputUpdatesDisabled bool + ThroughputUpdatesDisabled bool `yaml:"throughput_updates_disabled"` // Master 'on-switch' for table retention deletions - RetentionDeletesEnabled bool + RetentionDeletesEnabled bool `yaml:"retention_deletes_enabled"` // How far back tables will be kept before they are deleted - RetentionPeriod time.Duration + RetentionPeriod time.Duration `yaml:"retention_period"` // Period with which the table manager will poll for tables. - DynamoDBPollInterval time.Duration + DynamoDBPollInterval time.Duration `yaml:"dynamodb_poll_interval"` // duration a table will be created before it is needed. - CreationGracePeriod time.Duration + CreationGracePeriod time.Duration `yaml:"creation_grace_period"` - IndexTables ProvisionConfig - ChunkTables ProvisionConfig + IndexTables ProvisionConfig `yaml:"index_tables_provisioning"` + ChunkTables ProvisionConfig `yaml:"chunk_tables_provisioning"` } // ProvisionConfig holds config for provisioning capacity (on DynamoDB) type ProvisionConfig struct { - ProvisionedThroughputOnDemandMode bool - ProvisionedWriteThroughput int64 - ProvisionedReadThroughput int64 - InactiveThroughputOnDemandMode bool - InactiveWriteThroughput int64 - InactiveReadThroughput int64 - - WriteScale AutoScalingConfig - InactiveWriteScale AutoScalingConfig - InactiveWriteScaleLastN int64 - ReadScale AutoScalingConfig - InactiveReadScale AutoScalingConfig - InactiveReadScaleLastN int64 + ProvisionedThroughputOnDemandMode bool `yaml:"provisioned_throughput_on_demand_mode"` + ProvisionedWriteThroughput int64 `yaml:"provisioned_write_throughput"` + ProvisionedReadThroughput int64 `yaml:"provisioned_read_throughput"` + InactiveThroughputOnDemandMode bool `yaml:"inactive_throughput_on_demand_mode"` + InactiveWriteThroughput int64 `yaml:"inactive_write_throughput"` + InactiveReadThroughput int64 `yaml:"inactive_read_throughput"` + + WriteScale AutoScalingConfig `yaml:"write_scale"` + InactiveWriteScale AutoScalingConfig `yaml:"inactive_write_scale"` + InactiveWriteScaleLastN int64 `yaml:"inactive_write_scale_lastn"` + ReadScale AutoScalingConfig `yaml:"read_scale"` + InactiveReadScale AutoScalingConfig `yaml:"inactive_read_scale"` + InactiveReadScaleLastN int64 `yaml:"inactive_read_scale_lastn"` } // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -112,22 +114,25 @@ func (cfg *ProvisionConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { // TableManager creates and manages the provisioned throughput on DynamoDB tables type TableManager struct { - client TableClient - cfg TableManagerConfig - schemaCfg SchemaConfig - maxChunkAge time.Duration - done chan struct{} - wait sync.WaitGroup + client TableClient + cfg TableManagerConfig + schemaCfg SchemaConfig + maxChunkAge time.Duration + done chan struct{} + wait sync.WaitGroup + bucketClient BucketClient } // NewTableManager makes a new TableManager -func NewTableManager(cfg TableManagerConfig, schemaCfg SchemaConfig, maxChunkAge time.Duration, tableClient TableClient) (*TableManager, error) { +func NewTableManager(cfg TableManagerConfig, schemaCfg SchemaConfig, maxChunkAge time.Duration, tableClient TableClient, + objectClient BucketClient) (*TableManager, error) { return &TableManager{ - cfg: cfg, - schemaCfg: schemaCfg, - maxChunkAge: maxChunkAge, - client: tableClient, - done: make(chan struct{}), + cfg: cfg, + schemaCfg: schemaCfg, + maxChunkAge: maxChunkAge, + client: tableClient, + done: make(chan struct{}), + bucketClient: objectClient, }, nil } @@ -135,6 +140,11 @@ func NewTableManager(cfg TableManagerConfig, schemaCfg SchemaConfig, maxChunkAge func (m *TableManager) Start() { m.wait.Add(1) go m.loop() + + if m.bucketClient != nil && m.cfg.RetentionPeriod != 0 && m.cfg.RetentionDeletesEnabled == true { + m.wait.Add(1) + go m.bucketRetentionLoop() + } } // Stop the TableManager @@ -169,6 +179,26 @@ func (m *TableManager) loop() { } } +func (m *TableManager) bucketRetentionLoop() { + defer m.wait.Done() + + ticker := time.NewTicker(bucketRetentionEnforcementInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := m.bucketClient.DeleteChunksBefore(context.Background(), mtime.Now().Add(-m.cfg.RetentionPeriod)) + + if err != nil { + level.Error(util.Logger).Log("msg", "error enforcing filesystem retention", "err", err) + } + case <-m.done: + return + } + } +} + // SyncTables will calculate the tables expected to exist, create those that do // not and update those that need it. It is exposed for testing. func (m *TableManager) SyncTables(ctx context.Context) error { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go index d6cfb69f0e22885e80cfc3e36836571e0038bf94..0a5f1cf9dbecbdbacb9223fdb51a70a90ba31689 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go @@ -8,8 +8,10 @@ import ( promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/ingester/client" ) const ( @@ -38,7 +40,7 @@ func Setup(fixture Fixture, tableName string) (chunk.IndexClient, chunk.ObjectCl return nil, nil, err } - tableManager, err := chunk.NewTableManager(tbmConfig, schemaConfig, 12*time.Hour, tableClient) + tableManager, err := chunk.NewTableManager(tbmConfig, schemaConfig, 12*time.Hour, tableClient, nil) if err != nil { return nil, nil, err } @@ -59,9 +61,9 @@ func CreateChunks(startIndex, batchSize int, start model.Time) ([]string, []chun keys := []string{} chunks := []chunk.Chunk{} for j := 0; j < batchSize; j++ { - chunk := dummyChunkFor(start, model.Metric{ - model.MetricNameLabel: "foo", - "index": model.LabelValue(strconv.Itoa(startIndex*batchSize + j)), + chunk := dummyChunkFor(start, labels.Labels{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "index", Value: strconv.Itoa(startIndex*batchSize + j)}, }) chunks = append(chunks, chunk) keys = append(keys, chunk.ExternalKey()) @@ -70,18 +72,18 @@ func CreateChunks(startIndex, batchSize int, start model.Time) ([]string, []chun } func dummyChunk(now model.Time) chunk.Chunk { - return dummyChunkFor(now, model.Metric{ - model.MetricNameLabel: "foo", - "bar": "baz", - "toms": "code", + return dummyChunkFor(now, labels.Labels{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "bar", Value: "baz"}, + {Name: "toms", Value: "code"}, }) } -func dummyChunkFor(now model.Time, metric model.Metric) chunk.Chunk { +func dummyChunkFor(now model.Time, metric labels.Labels) chunk.Chunk { cs, _ := promchunk.New().Add(model.SamplePair{Timestamp: now, Value: 0}) chunk := chunk.NewChunk( userID, - metric.Fingerprint(), + client.Fingerprint(metric), metric, cs[0], now.Add(-time.Hour), diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index f95d1e27877b1e8a78dfa8f05a186a56e7eb85be..dfd2531717c16892c69dc150c948570ced5c2bb1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -12,6 +12,8 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/util" ) var json = jsoniter.ConfigCompatibleWithStandardLibrary @@ -209,11 +211,7 @@ func FromLabelsToLabelAdapaters(ls labels.Labels) []LabelAdapter { // FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric. // Don't do this on any performance sensitive paths. func FromLabelAdaptersToMetric(ls []LabelAdapter) model.Metric { - result := make(model.Metric, len(ls)) - for _, l := range ls { - result[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - return result + return util.LabelsToMetric(FromLabelAdaptersToLabels(ls)) } // FromMetricsToLabelAdapters converts model.Metric to []LabelAdapter. @@ -254,6 +252,18 @@ func FastFingerprint(ls []LabelAdapter) model.Fingerprint { return model.Fingerprint(result) } +// Fingerprint runs the same algorithm as Prometheus labelSetToFingerprint() +func Fingerprint(labels labels.Labels) model.Fingerprint { + sum := hashNew() + for _, label := range labels { + sum = hashAddString(sum, label.Name) + sum = hashAddByte(sum, model.SeparatorByte) + sum = hashAddString(sum, label.Value) + sum = hashAddByte(sum, model.SeparatorByte) + } + return model.Fingerprint(sum) +} + // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { t, err := json.Marshal(model.Time(s.TimestampMs)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index b7a9689fb025bd1ccb9682048d042f99ee82d812..5b9106905f5ae7ed14f0f518bb03ec6667976a21 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +// source: cortex.proto package client @@ -53,7 +53,7 @@ var MatchType_value = map[string]int32{ } func (MatchType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{0} + return fileDescriptor_893a47d0a749d749, []int{0} } type WriteRequest_SourceEnum int32 @@ -74,7 +74,7 @@ var WriteRequest_SourceEnum_value = map[string]int32{ } func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{0, 0} + return fileDescriptor_893a47d0a749d749, []int{0, 0} } type WriteRequest struct { @@ -85,7 +85,7 @@ type WriteRequest struct { func (m *WriteRequest) Reset() { *m = WriteRequest{} } func (*WriteRequest) ProtoMessage() {} func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{0} + return fileDescriptor_893a47d0a749d749, []int{0} } func (m *WriteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -127,7 +127,7 @@ type WriteResponse struct { func (m *WriteResponse) Reset() { *m = WriteResponse{} } func (*WriteResponse) ProtoMessage() {} func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{1} + return fileDescriptor_893a47d0a749d749, []int{1} } func (m *WriteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ type ReadRequest struct { func (m *ReadRequest) Reset() { *m = ReadRequest{} } func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{2} + return fileDescriptor_893a47d0a749d749, []int{2} } func (m *ReadRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -206,7 +206,7 @@ type ReadResponse struct { func (m *ReadResponse) Reset() { *m = ReadResponse{} } func (*ReadResponse) ProtoMessage() {} func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{3} + return fileDescriptor_893a47d0a749d749, []int{3} } func (m *ReadResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -251,7 +251,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (*QueryRequest) ProtoMessage() {} func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{4} + return fileDescriptor_893a47d0a749d749, []int{4} } func (m *QueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -308,7 +308,7 @@ type QueryResponse struct { func (m *QueryResponse) Reset() { *m = QueryResponse{} } func (*QueryResponse) ProtoMessage() {} func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{5} + return fileDescriptor_893a47d0a749d749, []int{5} } func (m *QueryResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -352,7 +352,7 @@ type QueryStreamResponse struct { func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } func (*QueryStreamResponse) ProtoMessage() {} func (*QueryStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{6} + return fileDescriptor_893a47d0a749d749, []int{6} } func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -395,7 +395,7 @@ type LabelValuesRequest struct { func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } func (*LabelValuesRequest) ProtoMessage() {} func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{7} + return fileDescriptor_893a47d0a749d749, []int{7} } func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ type LabelValuesResponse struct { func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } func (*LabelValuesResponse) ProtoMessage() {} func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{8} + return fileDescriptor_893a47d0a749d749, []int{8} } func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -480,7 +480,7 @@ type LabelNamesRequest struct { func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } func (*LabelNamesRequest) ProtoMessage() {} func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{9} + return fileDescriptor_893a47d0a749d749, []int{9} } func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -516,7 +516,7 @@ type LabelNamesResponse struct { func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } func (*LabelNamesResponse) ProtoMessage() {} func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{10} + return fileDescriptor_893a47d0a749d749, []int{10} } func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -558,7 +558,7 @@ type UserStatsRequest struct { func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } func (*UserStatsRequest) ProtoMessage() {} func (*UserStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{11} + return fileDescriptor_893a47d0a749d749, []int{11} } func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -597,7 +597,7 @@ type UserStatsResponse struct { func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } func (*UserStatsResponse) ProtoMessage() {} func (*UserStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{12} + return fileDescriptor_893a47d0a749d749, []int{12} } func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ type UserIDStatsResponse struct { func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } func (*UserIDStatsResponse) ProtoMessage() {} func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{13} + return fileDescriptor_893a47d0a749d749, []int{13} } func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -712,7 +712,7 @@ type UsersStatsResponse struct { func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } func (*UsersStatsResponse) ProtoMessage() {} func (*UsersStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{14} + return fileDescriptor_893a47d0a749d749, []int{14} } func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -757,7 +757,7 @@ type MetricsForLabelMatchersRequest struct { func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } func (*MetricsForLabelMatchersRequest) ProtoMessage() {} func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{15} + return fileDescriptor_893a47d0a749d749, []int{15} } func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -814,7 +814,7 @@ type MetricsForLabelMatchersResponse struct { func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } func (*MetricsForLabelMatchersResponse) ProtoMessage() {} func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{16} + return fileDescriptor_893a47d0a749d749, []int{16} } func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -860,7 +860,7 @@ type TimeSeriesChunk struct { func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } func (*TimeSeriesChunk) ProtoMessage() {} func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{17} + return fileDescriptor_893a47d0a749d749, []int{17} } func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +920,7 @@ type Chunk struct { func (m *Chunk) Reset() { *m = Chunk{} } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{18} + return fileDescriptor_893a47d0a749d749, []int{18} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -983,7 +983,7 @@ type TransferChunksResponse struct { func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } func (*TransferChunksResponse) ProtoMessage() {} func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{19} + return fileDescriptor_893a47d0a749d749, []int{19} } func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1021,7 +1021,7 @@ type TimeSeries struct { func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{20} + return fileDescriptor_893a47d0a749d749, []int{20} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1065,7 +1065,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{21} + return fileDescriptor_893a47d0a749d749, []int{21} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1116,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{22} + return fileDescriptor_893a47d0a749d749, []int{22} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1166,7 +1166,7 @@ type LabelMatchers struct { func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } func (*LabelMatchers) ProtoMessage() {} func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{23} + return fileDescriptor_893a47d0a749d749, []int{23} } func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1209,7 +1209,7 @@ type Metric struct { func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{24} + return fileDescriptor_893a47d0a749d749, []int{24} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1247,7 +1247,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_db0f8a1e534b119a, []int{25} + return fileDescriptor_893a47d0a749d749, []int{25} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1328,89 +1328,86 @@ func init() { proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") } -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_db0f8a1e534b119a) -} - -var fileDescriptor_db0f8a1e534b119a = []byte{ - // 1231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xdf, 0x8d, 0x7f, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0x92, 0x7e, 0x9b, 0xba, 0xfa, 0x6e, 0xca, - 0x48, 0x2d, 0x11, 0x50, 0xa7, 0xa4, 0x2a, 0xf4, 0x40, 0x55, 0x9c, 0x36, 0x6d, 0x8d, 0x92, 0x34, - 0x1d, 0xbb, 0x80, 0x90, 0xd0, 0x6a, 0x63, 0x4f, 0x9d, 0xa5, 0xfb, 0xc3, 0x9d, 0x99, 0x45, 0xf4, - 0x80, 0xc4, 0x7f, 0x00, 0x47, 0xf8, 0x0f, 0x38, 0x73, 0x81, 0x33, 0xa7, 0x1e, 0x7b, 0xac, 0x38, - 0x54, 0xd4, 0xbd, 0x70, 0xec, 0x9f, 0x80, 0x76, 0x66, 0x76, 0xbd, 0xeb, 0xda, 0xa2, 0x02, 0xf5, - 0xe6, 0x79, 0xef, 0xf3, 0x3e, 0xf3, 0xe6, 0xfd, 0x5c, 0xc3, 0xc7, 0x43, 0x57, 0x1c, 0x47, 0x47, - 0xad, 0x7e, 0xe8, 0x6f, 0xf5, 0x43, 0x26, 0xe8, 0x37, 0x23, 0x16, 0x7e, 0x45, 0xfb, 0x42, 0x9f, - 0xb6, 0x46, 0x0f, 0x86, 0x5b, 0x6e, 0x30, 0xa4, 0x5c, 0x50, 0xb6, 0xd5, 0xf7, 0x5c, 0x1a, 0x24, - 0xaa, 0xd6, 0x88, 0x85, 0x22, 0x44, 0x65, 0x75, 0x6a, 0x5e, 0xc8, 0x30, 0x0d, 0xc3, 0x61, 0xb8, - 0x25, 0xd5, 0x47, 0xd1, 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x4b, 0x99, 0xe1, 0xdf, 0x4c, 0xa8, 0x7d, - 0xc6, 0x5c, 0x41, 0x09, 0x7d, 0x18, 0x51, 0x2e, 0xd0, 0x01, 0x80, 0x70, 0x7d, 0xca, 0x29, 0x73, - 0x29, 0x5f, 0x37, 0xcf, 0x16, 0x36, 0xab, 0xdb, 0xa8, 0xa5, 0xaf, 0xea, 0xb9, 0x3e, 0xed, 0x4a, - 0xcd, 0x4e, 0xf3, 0xf1, 0xb3, 0x0d, 0xe3, 0x8f, 0x67, 0x1b, 0xe8, 0x90, 0x51, 0xc7, 0xf3, 0xc2, - 0x7e, 0x2f, 0xb5, 0x22, 0x19, 0x06, 0xf4, 0x21, 0x94, 0xbb, 0x61, 0xc4, 0xfa, 0x74, 0x7d, 0xe1, - 0xac, 0xb9, 0x59, 0xdf, 0xde, 0x48, 0xb8, 0xb2, 0xb7, 0xb6, 0x14, 0x64, 0x37, 0x88, 0x7c, 0x52, - 0xe6, 0xf2, 0x37, 0xde, 0x00, 0x98, 0x48, 0xd1, 0x22, 0x14, 0xda, 0x87, 0x9d, 0x86, 0x81, 0x96, - 0xa0, 0x48, 0xee, 0xed, 0xed, 0x36, 0x4c, 0x7c, 0x02, 0x96, 0x35, 0x07, 0x1f, 0x85, 0x01, 0xa7, - 0xf8, 0x2a, 0x54, 0x09, 0x75, 0x06, 0xc9, 0x4b, 0x5a, 0xb0, 0xf8, 0x30, 0xca, 0x3e, 0x63, 0x2d, - 0xb9, 0xfa, 0x6e, 0x44, 0xd9, 0x23, 0x0d, 0x23, 0x09, 0x08, 0x5f, 0x83, 0x9a, 0x32, 0x57, 0x74, - 0x68, 0x0b, 0x16, 0x19, 0xe5, 0x91, 0x27, 0x12, 0xfb, 0x93, 0x53, 0xf6, 0x0a, 0x47, 0x12, 0x14, - 0xfe, 0xd1, 0x84, 0x5a, 0x96, 0x1a, 0xbd, 0x07, 0x88, 0x0b, 0x87, 0x09, 0x5b, 0xc6, 0x43, 0x38, - 0xfe, 0xc8, 0xf6, 0x63, 0x32, 0x73, 0xb3, 0x40, 0x1a, 0x52, 0xd3, 0x4b, 0x14, 0xfb, 0x1c, 0x6d, - 0x42, 0x83, 0x06, 0x83, 0x3c, 0x76, 0x41, 0x62, 0xeb, 0x34, 0x18, 0x64, 0x91, 0x17, 0x61, 0xc9, - 0x77, 0x44, 0xff, 0x98, 0x32, 0xbe, 0x5e, 0xc8, 0x3f, 0x6d, 0xcf, 0x39, 0xa2, 0xde, 0xbe, 0x52, - 0x92, 0x14, 0x85, 0x3b, 0xb0, 0x9c, 0x73, 0x1a, 0x5d, 0x79, 0xcd, 0x34, 0x17, 0xe3, 0x34, 0x67, - 0x13, 0x8a, 0x7b, 0xb0, 0x2a, 0xa9, 0xba, 0x82, 0x51, 0xc7, 0x4f, 0x09, 0xaf, 0xce, 0x20, 0x3c, - 0xf5, 0x2a, 0xe1, 0xf5, 0xe3, 0x28, 0x78, 0x30, 0x83, 0xf5, 0x12, 0x20, 0xe9, 0xfa, 0xa7, 0x8e, - 0x17, 0x51, 0x9e, 0x04, 0xf0, 0xff, 0x00, 0x5e, 0x2c, 0xb5, 0x03, 0xc7, 0xa7, 0x32, 0x70, 0x15, - 0x52, 0x91, 0x92, 0x03, 0xc7, 0xa7, 0xf8, 0x0a, 0xac, 0xe6, 0x8c, 0xb4, 0x2b, 0x6f, 0x41, 0x4d, - 0x59, 0x7d, 0x2d, 0xe5, 0xd2, 0x99, 0x0a, 0xa9, 0x7a, 0x13, 0x28, 0x5e, 0x85, 0x95, 0xbd, 0x84, - 0x26, 0xb9, 0x0d, 0x5f, 0xd6, 0x3e, 0x68, 0xa1, 0x66, 0xdb, 0x80, 0xea, 0xc4, 0x87, 0x84, 0x0c, - 0x52, 0x27, 0x38, 0x46, 0xd0, 0xb8, 0xc7, 0x29, 0xeb, 0x0a, 0x47, 0xa4, 0x54, 0xbf, 0x9a, 0xb0, - 0x92, 0x11, 0x6a, 0xaa, 0x73, 0x50, 0x57, 0x3d, 0xec, 0x86, 0x81, 0xcd, 0x1c, 0xa1, 0x9e, 0x64, - 0x92, 0xe5, 0x54, 0x4a, 0x1c, 0x41, 0xe3, 0x57, 0x07, 0x91, 0x6f, 0xeb, 0x50, 0xc6, 0x25, 0x50, - 0x24, 0x95, 0x20, 0xf2, 0x55, 0x04, 0xe3, 0xaa, 0x72, 0x46, 0xae, 0x3d, 0xc5, 0x54, 0x90, 0x4c, - 0x0d, 0x67, 0xe4, 0x76, 0x72, 0x64, 0x2d, 0x58, 0x65, 0x91, 0x47, 0xa7, 0xe1, 0x45, 0x09, 0x5f, - 0x89, 0x55, 0x39, 0x3c, 0xfe, 0x12, 0x56, 0x63, 0xc7, 0x3b, 0x37, 0xf2, 0xae, 0x9f, 0x82, 0xc5, - 0x88, 0x53, 0x66, 0xbb, 0x03, 0x9d, 0x86, 0x72, 0x7c, 0xec, 0x0c, 0xd0, 0x05, 0x28, 0x0e, 0x1c, - 0xe1, 0x48, 0x37, 0xab, 0xdb, 0xa7, 0x93, 0x8c, 0xbf, 0xf2, 0x78, 0x22, 0x61, 0xf8, 0x16, 0xa0, - 0x58, 0xc5, 0xf3, 0xec, 0xef, 0x43, 0x89, 0xc7, 0x02, 0x5d, 0x37, 0x67, 0xb2, 0x2c, 0x53, 0x9e, - 0x10, 0x85, 0xc4, 0xbf, 0x98, 0x60, 0xed, 0x53, 0xc1, 0xdc, 0x3e, 0xbf, 0x19, 0xb2, 0x6c, 0xd9, - 0xf3, 0x37, 0xdd, 0x7e, 0x57, 0xa0, 0x96, 0x34, 0x96, 0xcd, 0xa9, 0xd0, 0x2d, 0x78, 0x72, 0x56, - 0x0b, 0x72, 0x52, 0x4d, 0xa0, 0x5d, 0x2a, 0x70, 0x07, 0x36, 0xe6, 0xfa, 0xac, 0x43, 0x71, 0x1e, - 0xca, 0xbe, 0x84, 0xe8, 0x58, 0xd4, 0x13, 0x5a, 0x65, 0x48, 0xb4, 0x16, 0xff, 0x6e, 0xc2, 0x89, - 0xa9, 0xb6, 0x8a, 0x9f, 0x70, 0x9f, 0x85, 0xbe, 0x9d, 0x2c, 0x8a, 0x49, 0xb6, 0xea, 0xb1, 0xbc, - 0xa3, 0xc5, 0x9d, 0x41, 0x36, 0x9d, 0x0b, 0xb9, 0x74, 0x5e, 0x83, 0xb2, 0x2c, 0xed, 0x64, 0xb0, - 0xac, 0xe4, 0x5e, 0x75, 0xe8, 0xb8, 0x6c, 0x67, 0x4d, 0x4f, 0xfe, 0x9a, 0x14, 0xb5, 0x07, 0xce, - 0x48, 0x50, 0x46, 0xb4, 0x19, 0x7a, 0x17, 0xca, 0xfd, 0xd8, 0x19, 0xbe, 0x5e, 0x94, 0x04, 0xcb, - 0x09, 0x41, 0xb6, 0xf3, 0x35, 0x04, 0x7f, 0x6f, 0x42, 0x49, 0xb9, 0xfe, 0xa6, 0x72, 0xd5, 0x84, - 0x25, 0x1a, 0xf4, 0xc3, 0x81, 0x1b, 0x0c, 0x65, 0x8b, 0x94, 0x48, 0x7a, 0x46, 0x48, 0x97, 0x6e, - 0xdc, 0x0b, 0x35, 0x5d, 0x9f, 0xeb, 0xf0, 0xbf, 0x1e, 0x73, 0x02, 0x7e, 0x9f, 0x32, 0xe9, 0x58, - 0x9a, 0x18, 0xfc, 0x2d, 0xc0, 0x24, 0xde, 0x99, 0x38, 0x99, 0xff, 0x2e, 0x4e, 0x2d, 0x58, 0xe4, - 0x8e, 0x3f, 0xf2, 0x64, 0x87, 0xe7, 0x12, 0xdd, 0x95, 0x62, 0x1d, 0xa9, 0x04, 0x84, 0x2f, 0x43, - 0x25, 0xa5, 0x8e, 0x3d, 0x4f, 0x27, 0x62, 0x8d, 0xc8, 0xdf, 0x68, 0x0d, 0x4a, 0x72, 0xde, 0xc9, - 0x40, 0xd4, 0x88, 0x3a, 0xe0, 0x36, 0x94, 0x15, 0xdf, 0x44, 0xaf, 0x66, 0x8e, 0x3a, 0xc4, 0xb3, - 0x72, 0x46, 0x14, 0xab, 0x62, 0x12, 0x42, 0xdc, 0x86, 0xe5, 0x5c, 0xa9, 0xe6, 0xd6, 0x8f, 0xf9, - 0x9a, 0xeb, 0xa7, 0xac, 0xca, 0xf7, 0x3f, 0xc7, 0x0d, 0xdb, 0x50, 0xcb, 0x5e, 0x82, 0xce, 0x41, - 0x51, 0x3c, 0x1a, 0xa9, 0x57, 0xd5, 0x27, 0x74, 0x52, 0xdd, 0x7b, 0x34, 0xa2, 0x44, 0xaa, 0xd3, - 0x88, 0xa9, 0x6a, 0x9f, 0x8a, 0x58, 0x41, 0x0a, 0xd5, 0xe1, 0x9d, 0x4f, 0xa0, 0x92, 0x1a, 0xa3, - 0x0a, 0x94, 0x76, 0xef, 0xde, 0x6b, 0xef, 0x35, 0x0c, 0xb4, 0x0c, 0x95, 0x83, 0x3b, 0x3d, 0x5b, - 0x1d, 0x4d, 0x74, 0x02, 0xaa, 0x64, 0xf7, 0xd6, 0xee, 0xe7, 0xf6, 0x7e, 0xbb, 0x77, 0xfd, 0x76, - 0x63, 0x01, 0x21, 0xa8, 0x2b, 0xc1, 0xc1, 0x1d, 0x2d, 0x2b, 0x6c, 0xff, 0x54, 0x82, 0xa5, 0xa4, - 0xeb, 0xd0, 0x65, 0x28, 0x1e, 0x46, 0xfc, 0x18, 0xad, 0xcd, 0xfa, 0x02, 0x6a, 0x9e, 0x9c, 0x92, - 0xea, 0xaa, 0x33, 0xd0, 0x07, 0x50, 0x92, 0xfb, 0x16, 0xcd, 0xfc, 0x7c, 0x69, 0xce, 0xfe, 0x28, - 0xc1, 0x06, 0xba, 0x01, 0xd5, 0xcc, 0x9e, 0x9e, 0x63, 0x7d, 0x26, 0x27, 0xcd, 0xaf, 0x74, 0x6c, - 0x5c, 0x34, 0xd1, 0x6d, 0xa8, 0x66, 0x56, 0x2c, 0x6a, 0xe6, 0xd2, 0x95, 0x5b, 0xd6, 0x13, 0xae, - 0x19, 0x3b, 0x19, 0x1b, 0x68, 0x17, 0x60, 0xb2, 0x5d, 0xd1, 0xe9, 0x1c, 0x38, 0xbb, 0x86, 0x9b, - 0xcd, 0x59, 0xaa, 0x94, 0x66, 0x07, 0x2a, 0xe9, 0x6e, 0x41, 0xeb, 0x33, 0xd6, 0x8d, 0x22, 0x99, - 0xbf, 0x88, 0xb0, 0x81, 0x6e, 0x42, 0xad, 0xed, 0x79, 0xaf, 0x43, 0xd3, 0xcc, 0x6a, 0xf8, 0x34, - 0x8f, 0x07, 0xa7, 0xe6, 0x8c, 0x73, 0x74, 0x3e, 0x3f, 0xb6, 0xe7, 0xed, 0xa8, 0xe6, 0xdb, 0xff, - 0x88, 0x4b, 0x6f, 0xdb, 0x87, 0x7a, 0x7e, 0x34, 0xa1, 0x79, 0xdf, 0x57, 0x4d, 0x2b, 0x55, 0xcc, - 0x9e, 0x65, 0xc6, 0xa6, 0xb9, 0xf3, 0xd1, 0x93, 0xe7, 0x96, 0xf1, 0xf4, 0xb9, 0x65, 0xbc, 0x7c, - 0x6e, 0x99, 0xdf, 0x8d, 0x2d, 0xf3, 0xe7, 0xb1, 0x65, 0x3e, 0x1e, 0x5b, 0xe6, 0x93, 0xb1, 0x65, - 0xfe, 0x39, 0xb6, 0xcc, 0xbf, 0xc6, 0x96, 0xf1, 0x72, 0x6c, 0x99, 0x3f, 0xbc, 0xb0, 0x8c, 0x27, - 0x2f, 0x2c, 0xe3, 0xe9, 0x0b, 0xcb, 0xf8, 0xa2, 0xac, 0xfe, 0x7b, 0x1c, 0x95, 0xe5, 0xdf, 0x87, - 0x4b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x95, 0x27, 0x3b, 0x4e, 0xb9, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } + +var fileDescriptor_893a47d0a749d749 = []byte{ + // 1205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xdf, 0x8d, 0xff, 0x24, 0x7e, 0x5e, 0xbb, 0xce, 0x24, 0xa5, 0xae, 0x2b, 0xd6, 0x65, 0xa4, + 0x96, 0x08, 0xa8, 0x5b, 0x52, 0x15, 0x7a, 0xa0, 0xaa, 0x9c, 0xd6, 0x6d, 0x8d, 0x92, 0x34, 0x1d, + 0xbb, 0x80, 0x90, 0x90, 0xb5, 0xb1, 0xa7, 0xc9, 0x8a, 0xfd, 0xe3, 0xee, 0xcc, 0x22, 0x7a, 0x40, + 0xe2, 0x1b, 0xc0, 0x11, 0xbe, 0x01, 0x67, 0x2e, 0x70, 0xe6, 0xd4, 0x63, 0x8e, 0x15, 0x87, 0x8a, + 0x38, 0x17, 0x8e, 0xfd, 0x08, 0x68, 0x67, 0x66, 0xd7, 0xbb, 0xae, 0x2d, 0x22, 0x50, 0x6f, 0x9e, + 0xf7, 0x7e, 0xef, 0x37, 0x6f, 0xde, 0xdf, 0x35, 0x18, 0x43, 0x3f, 0xe0, 0xf4, 0xdb, 0xd6, 0x38, + 0xf0, 0xb9, 0x8f, 0x8a, 0xf2, 0xd4, 0xb8, 0x72, 0x60, 0xf3, 0xc3, 0x70, 0xbf, 0x35, 0xf4, 0xdd, + 0xab, 0x07, 0xfe, 0x81, 0x7f, 0x55, 0xa8, 0xf7, 0xc3, 0x27, 0xe2, 0x24, 0x0e, 0xe2, 0x97, 0x34, + 0xc3, 0xbf, 0xeb, 0x60, 0x7c, 0x1e, 0xd8, 0x9c, 0x12, 0xfa, 0x34, 0xa4, 0x8c, 0xa3, 0x5d, 0x00, + 0x6e, 0xbb, 0x94, 0xd1, 0xc0, 0xa6, 0xac, 0xae, 0x5f, 0xcc, 0x6d, 0x94, 0x37, 0x51, 0x4b, 0x5d, + 0xd5, 0xb7, 0x5d, 0xda, 0x13, 0x9a, 0xad, 0xc6, 0xf3, 0x97, 0x4d, 0xed, 0xcf, 0x97, 0x4d, 0xb4, + 0x17, 0x50, 0xcb, 0x71, 0xfc, 0x61, 0x3f, 0xb1, 0x22, 0x29, 0x06, 0xf4, 0x31, 0x14, 0x7b, 0x7e, + 0x18, 0x0c, 0x69, 0x7d, 0xe9, 0xa2, 0xbe, 0x51, 0xdd, 0x6c, 0xc6, 0x5c, 0xe9, 0x5b, 0x5b, 0x12, + 0xd2, 0xf1, 0x42, 0x97, 0x14, 0x99, 0xf8, 0x8d, 0x9b, 0x00, 0x53, 0x29, 0x5a, 0x86, 0x5c, 0x7b, + 0xaf, 0x5b, 0xd3, 0xd0, 0x0a, 0xe4, 0xc9, 0xe3, 0xed, 0x4e, 0x4d, 0xc7, 0x67, 0xa0, 0xa2, 0x38, + 0xd8, 0xd8, 0xf7, 0x18, 0xc5, 0xb7, 0xa0, 0x4c, 0xa8, 0x35, 0x8a, 0x5f, 0xd2, 0x82, 0xe5, 0xa7, + 0x61, 0xfa, 0x19, 0xeb, 0xf1, 0xd5, 0x8f, 0x42, 0x1a, 0x3c, 0x53, 0x30, 0x12, 0x83, 0xf0, 0x6d, + 0x30, 0xa4, 0xb9, 0xa4, 0x43, 0x57, 0x61, 0x39, 0xa0, 0x2c, 0x74, 0x78, 0x6c, 0x7f, 0x76, 0xc6, + 0x5e, 0xe2, 0x48, 0x8c, 0xc2, 0x3f, 0xe9, 0x60, 0xa4, 0xa9, 0xd1, 0x07, 0x80, 0x18, 0xb7, 0x02, + 0x3e, 0x10, 0xf1, 0xe0, 0x96, 0x3b, 0x1e, 0xb8, 0x11, 0x99, 0xbe, 0x91, 0x23, 0x35, 0xa1, 0xe9, + 0xc7, 0x8a, 0x1d, 0x86, 0x36, 0xa0, 0x46, 0xbd, 0x51, 0x16, 0xbb, 0x24, 0xb0, 0x55, 0xea, 0x8d, + 0xd2, 0xc8, 0x6b, 0xb0, 0xe2, 0x5a, 0x7c, 0x78, 0x48, 0x03, 0x56, 0xcf, 0x65, 0x9f, 0xb6, 0x6d, + 0xed, 0x53, 0x67, 0x47, 0x2a, 0x49, 0x82, 0xc2, 0x5d, 0xa8, 0x64, 0x9c, 0x46, 0x37, 0x4f, 0x99, + 0xe6, 0x7c, 0x94, 0xe6, 0x74, 0x42, 0x71, 0x1f, 0xd6, 0x04, 0x55, 0x8f, 0x07, 0xd4, 0x72, 0x13, + 0xc2, 0x5b, 0x73, 0x08, 0xcf, 0xbd, 0x4e, 0x78, 0xe7, 0x30, 0xf4, 0xbe, 0x9e, 0xc3, 0x7a, 0x1d, + 0x90, 0x70, 0xfd, 0x33, 0xcb, 0x09, 0x29, 0x8b, 0x03, 0xf8, 0x36, 0x80, 0x13, 0x49, 0x07, 0x9e, + 0xe5, 0x52, 0x11, 0xb8, 0x12, 0x29, 0x09, 0xc9, 0xae, 0xe5, 0x52, 0x7c, 0x13, 0xd6, 0x32, 0x46, + 0xca, 0x95, 0x77, 0xc0, 0x90, 0x56, 0xdf, 0x08, 0xb9, 0x70, 0xa6, 0x44, 0xca, 0xce, 0x14, 0x8a, + 0xd7, 0x60, 0x75, 0x3b, 0xa6, 0x89, 0x6f, 0xc3, 0x37, 0x94, 0x0f, 0x4a, 0xa8, 0xd8, 0x9a, 0x50, + 0x9e, 0xfa, 0x10, 0x93, 0x41, 0xe2, 0x04, 0xc3, 0x08, 0x6a, 0x8f, 0x19, 0x0d, 0x7a, 0xdc, 0xe2, + 0x09, 0xd5, 0x6f, 0x3a, 0xac, 0xa6, 0x84, 0x8a, 0xea, 0x12, 0x54, 0x6d, 0xef, 0x80, 0x32, 0x6e, + 0xfb, 0xde, 0x20, 0xb0, 0xb8, 0x7c, 0x92, 0x4e, 0x2a, 0x89, 0x94, 0x58, 0x9c, 0x46, 0xaf, 0xf6, + 0x42, 0x77, 0xa0, 0x42, 0x19, 0x95, 0x40, 0x9e, 0x94, 0xbc, 0xd0, 0x95, 0x11, 0x8c, 0xaa, 0xca, + 0x1a, 0xdb, 0x83, 0x19, 0xa6, 0x9c, 0x60, 0xaa, 0x59, 0x63, 0xbb, 0x9b, 0x21, 0x6b, 0xc1, 0x5a, + 0x10, 0x3a, 0x74, 0x16, 0x9e, 0x17, 0xf0, 0xd5, 0x48, 0x95, 0xc1, 0xe3, 0xaf, 0x60, 0x2d, 0x72, + 0xbc, 0x7b, 0x37, 0xeb, 0xfa, 0x39, 0x58, 0x0e, 0x19, 0x0d, 0x06, 0xf6, 0x48, 0xa5, 0xa1, 0x18, + 0x1d, 0xbb, 0x23, 0x74, 0x05, 0xf2, 0x23, 0x8b, 0x5b, 0xc2, 0xcd, 0xf2, 0xe6, 0xf9, 0x38, 0xe3, + 0xaf, 0x3d, 0x9e, 0x08, 0x18, 0xbe, 0x0f, 0x28, 0x52, 0xb1, 0x2c, 0xfb, 0x87, 0x50, 0x60, 0x91, + 0x40, 0xd5, 0xcd, 0x85, 0x34, 0xcb, 0x8c, 0x27, 0x44, 0x22, 0xf1, 0xaf, 0x3a, 0x98, 0x3b, 0x94, + 0x07, 0xf6, 0x90, 0xdd, 0xf3, 0x83, 0x74, 0xd9, 0xb3, 0x37, 0xdd, 0x7e, 0x37, 0xc1, 0x88, 0x1b, + 0x6b, 0xc0, 0x28, 0x57, 0x2d, 0x78, 0x76, 0x5e, 0x0b, 0x32, 0x52, 0x8e, 0xa1, 0x3d, 0xca, 0x71, + 0x17, 0x9a, 0x0b, 0x7d, 0x56, 0xa1, 0xb8, 0x0c, 0x45, 0x57, 0x40, 0x54, 0x2c, 0xaa, 0x31, 0xad, + 0x34, 0x24, 0x4a, 0x8b, 0xff, 0xd0, 0xe1, 0xcc, 0x4c, 0x5b, 0x45, 0x4f, 0x78, 0x12, 0xf8, 0xae, + 0xca, 0x75, 0x3a, 0x5b, 0xd5, 0x48, 0xde, 0x55, 0xe2, 0xee, 0x28, 0x9d, 0xce, 0xa5, 0x4c, 0x3a, + 0x6f, 0x43, 0x51, 0x94, 0x76, 0x3c, 0x58, 0x56, 0x33, 0xaf, 0xda, 0xb3, 0xec, 0x60, 0x6b, 0x5d, + 0x4d, 0x7e, 0x43, 0x88, 0xda, 0x23, 0x6b, 0xcc, 0x69, 0x40, 0x94, 0x19, 0x7a, 0x1f, 0x8a, 0xc3, + 0xc8, 0x19, 0x56, 0xcf, 0x0b, 0x82, 0x4a, 0x4c, 0x90, 0xee, 0x7c, 0x05, 0xc1, 0x3f, 0xe8, 0x50, + 0x90, 0xae, 0xbf, 0xa9, 0x5c, 0x35, 0x60, 0x85, 0x7a, 0x43, 0x7f, 0x64, 0x7b, 0x07, 0xa2, 0x45, + 0x0a, 0x24, 0x39, 0x23, 0xa4, 0x4a, 0x37, 0xea, 0x05, 0x43, 0xd5, 0x67, 0x1d, 0xde, 0xea, 0x07, + 0x96, 0xc7, 0x9e, 0xd0, 0x40, 0x38, 0x96, 0x24, 0x06, 0x7f, 0x07, 0x30, 0x8d, 0x77, 0x2a, 0x4e, + 0xfa, 0x7f, 0x8b, 0x53, 0x0b, 0x96, 0x99, 0xe5, 0x8e, 0x1d, 0xd1, 0xe1, 0x99, 0x44, 0xf7, 0x84, + 0x58, 0x45, 0x2a, 0x06, 0xe1, 0x1b, 0x50, 0x4a, 0xa8, 0x23, 0xcf, 0x93, 0x89, 0x68, 0x10, 0xf1, + 0x1b, 0xad, 0x43, 0x41, 0xcc, 0x3b, 0x11, 0x08, 0x83, 0xc8, 0x03, 0x6e, 0x43, 0x51, 0xf2, 0x4d, + 0xf5, 0x72, 0xe6, 0xc8, 0x43, 0x34, 0x2b, 0xe7, 0x44, 0xb1, 0xcc, 0xa7, 0x21, 0xc4, 0x6d, 0xa8, + 0x64, 0x4a, 0x35, 0xb3, 0x7e, 0xf4, 0x53, 0xae, 0x9f, 0xa2, 0x2c, 0xdf, 0xff, 0x1d, 0x37, 0x3c, + 0x00, 0x23, 0x7d, 0x09, 0xba, 0x04, 0x79, 0xfe, 0x6c, 0x2c, 0x5f, 0x55, 0x9d, 0xd2, 0x09, 0x75, + 0xff, 0xd9, 0x98, 0x12, 0xa1, 0x4e, 0x22, 0x26, 0xab, 0x7d, 0x26, 0x62, 0x39, 0x21, 0x94, 0x87, + 0xf7, 0x3e, 0x85, 0x52, 0x62, 0x8c, 0x4a, 0x50, 0xe8, 0x3c, 0x7a, 0xdc, 0xde, 0xae, 0x69, 0xa8, + 0x02, 0xa5, 0xdd, 0x87, 0xfd, 0x81, 0x3c, 0xea, 0xe8, 0x0c, 0x94, 0x49, 0xe7, 0x7e, 0xe7, 0x8b, + 0xc1, 0x4e, 0xbb, 0x7f, 0xe7, 0x41, 0x6d, 0x09, 0x21, 0xa8, 0x4a, 0xc1, 0xee, 0x43, 0x25, 0xcb, + 0x6d, 0xfe, 0x5c, 0x80, 0x95, 0xb8, 0xeb, 0xd0, 0x0d, 0xc8, 0xef, 0x85, 0xec, 0x10, 0xad, 0xcf, + 0xfb, 0x02, 0x6a, 0x9c, 0x9d, 0x91, 0xaa, 0xaa, 0xd3, 0xd0, 0x47, 0x50, 0x10, 0xfb, 0x16, 0xcd, + 0xfd, 0x7c, 0x69, 0xcc, 0xff, 0x28, 0xc1, 0x1a, 0xba, 0x0b, 0xe5, 0xd4, 0x9e, 0x5e, 0x60, 0x7d, + 0x21, 0x23, 0xcd, 0xae, 0x74, 0xac, 0x5d, 0xd3, 0xd1, 0x03, 0x28, 0xa7, 0x56, 0x2c, 0x6a, 0x64, + 0xd2, 0x95, 0x59, 0xd6, 0x53, 0xae, 0x39, 0x3b, 0x19, 0x6b, 0xa8, 0x03, 0x30, 0xdd, 0xae, 0xe8, + 0x7c, 0x06, 0x9c, 0x5e, 0xc3, 0x8d, 0xc6, 0x3c, 0x55, 0x42, 0xb3, 0x05, 0xa5, 0x64, 0xb7, 0xa0, + 0xfa, 0x9c, 0x75, 0x23, 0x49, 0x16, 0x2f, 0x22, 0xac, 0xa1, 0x7b, 0x60, 0xb4, 0x1d, 0xe7, 0x34, + 0x34, 0x8d, 0xb4, 0x86, 0xcd, 0xf2, 0x38, 0x70, 0x6e, 0xc1, 0x38, 0x47, 0x97, 0xb3, 0x63, 0x7b, + 0xd1, 0x8e, 0x6a, 0xbc, 0xfb, 0xaf, 0xb8, 0xe4, 0xb6, 0x1d, 0xa8, 0x66, 0x47, 0x13, 0x5a, 0xf4, + 0x7d, 0xd5, 0x30, 0x13, 0xc5, 0xfc, 0x59, 0xa6, 0x6d, 0xe8, 0x5b, 0x9f, 0x1c, 0x1d, 0x9b, 0xda, + 0x8b, 0x63, 0x53, 0x7b, 0x75, 0x6c, 0xea, 0xdf, 0x4f, 0x4c, 0xfd, 0x97, 0x89, 0xa9, 0x3f, 0x9f, + 0x98, 0xfa, 0xd1, 0xc4, 0xd4, 0xff, 0x9a, 0x98, 0xfa, 0xdf, 0x13, 0x53, 0x7b, 0x35, 0x31, 0xf5, + 0x1f, 0x4f, 0x4c, 0xed, 0xe8, 0xc4, 0xd4, 0x5e, 0x9c, 0x98, 0xda, 0x97, 0xc5, 0xa1, 0x63, 0x53, + 0x8f, 0xef, 0x17, 0xc5, 0xdf, 0x87, 0xeb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x9b, 0x35, + 0x18, 0x85, 0x0c, 0x00, 0x00, } func (x MatchType) String() string { @@ -2906,7 +2903,7 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", + Metadata: "cortex.proto", } func (m *WriteRequest) Marshal() (dAtA []byte, err error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index c415085950606edaeb08cf0321b70a0317c483e4..ab7980b9b2c2643492ae35b3fac76026bf99febc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -38,6 +38,15 @@ func hashAdd(h uint64, s string) uint64 { return h } +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAddString(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. func hashAddByte(h uint64, b byte) uint64 { h ^= uint64(b) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go index 3c89eb53d64b0fad168b3383e4e1f485d1fa1243..3b7d1f7e3d804b02b591ab3a1583e2c86640d6a1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "net/http" + "strings" "time" "github.com/go-kit/kit/log/level" @@ -12,6 +13,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/cortexproject/cortex/pkg/util" + "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/instrument" ) @@ -29,12 +31,13 @@ type ConsulConfig struct { } // RegisterFlags adds the flags required to config this to the given FlagSet -func (cfg *ConsulConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Host, "consul.hostname", "localhost:8500", "Hostname and port of Consul.") - f.StringVar(&cfg.Prefix, "consul.prefix", "collectors/", "Prefix for keys in Consul.") - f.StringVar(&cfg.ACLToken, "consul.acltoken", "", "ACL Token used to interact with Consul.") - f.DurationVar(&cfg.HTTPClientTimeout, "consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to consul") - f.BoolVar(&cfg.ConsistentReads, "consul.consistent-reads", true, "Enable consistent reads to consul.") +// If prefix is not an empty string it should end with a period. +func (cfg *ConsulConfig) RegisterFlags(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Host, prefix+"consul.hostname", "localhost:8500", "Hostname and port of Consul.") + f.StringVar(&cfg.Prefix, prefix+"consul.prefix", "collectors/", "Prefix for keys in Consul. Should end with a /.") + f.StringVar(&cfg.ACLToken, prefix+"consul.acltoken", "", "ACL Token used to interact with Consul.") + f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to consul") + f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", true, "Enable consistent reads to consul.") } type kv interface { @@ -120,15 +123,19 @@ func (c *consulClient) cas(ctx context.Context, key string, f CASCallback) error intermediate, retry, err = f(intermediate) if err != nil { - level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) if !retry { + if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 { + level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) + } return err } continue } + // Treat the callback returning nil for intermediate as a decision to + // not actually write to Consul, but this is not an error. if intermediate == nil { - panic("Callback must instantiate value!") + return nil } bytes, err := c.codec.Encode(intermediate) @@ -203,6 +210,50 @@ func (c *consulClient) WatchKey(ctx context.Context, key string, f func(interfac } } +// WatchPrefix will watch a given prefix in Consul for new keys and changes to existing keys under that prefix. +// When the value under said key changes, the f callback is called with the deserialised value. +// Values in Consul are assumed to be JSON. This function blocks until the context is cancelled. +func (c *consulClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { + var ( + backoff = util.NewBackoff(ctx, backoffConfig) + index = uint64(0) + ) + for backoff.Ongoing() { + queryOptions := &consul.QueryOptions{ + RequireConsistent: true, + WaitIndex: index, + WaitTime: longPollDuration, + } + + kvps, meta, err := c.kv.List(prefix, queryOptions.WithContext(ctx)) + if err != nil || kvps == nil { + level.Error(util.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err) + backoff.Wait() + continue + } + backoff.Reset() + // Skip if the index is the same as last time, because the key value is + // guaranteed to be the same as last time + if index == meta.LastIndex { + continue + } + + index = meta.LastIndex + for _, kvp := range kvps { + out, err := c.codec.Decode(kvp.Value) + if err != nil { + level.Error(util.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err) + continue + } + // We should strip the prefix from the front of the key. + key := strings.TrimPrefix(kvp.Key, prefix) + if !f(key, out) { + return + } + } + } +} + func (c *consulClient) PutBytes(ctx context.Context, key string, buf []byte) error { _, err := c.kv.Put(&consul.KVPair{ Key: key, @@ -245,6 +296,13 @@ func (c *prefixedConsulClient) WatchKey(ctx context.Context, key string, f func( c.consul.WatchKey(ctx, c.prefix+key, f) } +// WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix. +func (c *prefixedConsulClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { + c.consul.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool { + return f(strings.TrimPrefix(k, c.prefix), i) + }) +} + // PutBytes writes bytes to Consul. func (c *prefixedConsulClient) PutBytes(ctx context.Context, key string, buf []byte) error { return c.consul.PutBytes(ctx, c.prefix+key, buf) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client_mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client_mock.go index bf53418bb198887b56133517ec76ec8fed943f41..9190f9290413caa22d19ad700556c7df0ffbd224 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client_mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client_mock.go @@ -18,7 +18,7 @@ type mockKV struct { } // NewInMemoryKVClient makes a new mock consul client. -func NewInMemoryKVClient() KVClient { +func NewInMemoryKVClient(codec Codec) KVClient { m := mockKV{ kvps: map[string]*consul.KVPair{}, } @@ -26,7 +26,7 @@ func NewInMemoryKVClient() KVClient { go m.loop() return &consulClient{ kv: &m, - codec: ProtoCodec{Factory: ProtoDescFactory}, + codec: codec, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kvstore.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kvstore.go index 6961bf7efdcd6e92774e2d84f45bf746f97681e9..b9d5101fb639fe64cf2f1bd6fdbac64be676f83f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kvstore.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kvstore.go @@ -2,6 +2,7 @@ package ring import ( "context" + "flag" "fmt" "sync" @@ -18,27 +19,55 @@ var inmemoryStore KVClient type KVClient interface { CAS(ctx context.Context, key string, f CASCallback) error WatchKey(ctx context.Context, key string, f func(interface{}) bool) + WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) Get(ctx context.Context, key string) (interface{}, error) PutBytes(ctx context.Context, key string, buf []byte) error } +// KVConfig is config for a KVStore currently used by ring and HA tracker, +// where store can be consul or inmemory. +type KVConfig struct { + Store string `yaml:"store,omitempty"` + Consul ConsulConfig `yaml:"consul,omitempty"` + + Mock KVClient +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet. +// If prefix is an empty string we will register consul flags with no prefix and the +// store flag with the prefix ring, so ring.store. For everything else we pass the prefix +// to the Consul flags. +// If prefix is not an empty string it should end with a period. +func (cfg *KVConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + // We need Consul flags to not have the ring prefix to maintain compatibility. + // This needs to be fixed in the future (1.0 release maybe?) when we normalize flags. + // At the moment we have consul.<flag-name>, and ring.store, going forward it would + // be easier to have everything under ring, so ring.consul.<flag-name> + cfg.Consul.RegisterFlags(f, prefix) + if prefix == "" { + prefix = "ring." + } + f.StringVar(&cfg.Store, prefix+"store", "consul", "Backend storage to use for the ring (consul, inmemory).") +} + // CASCallback is the type of the callback to CAS. If err is nil, out must be non-nil. type CASCallback func(in interface{}) (out interface{}, retry bool, err error) -func newKVStore(cfg Config) (KVClient, error) { +// NewKVStore creates a new KVstore client (inmemory or consul) based on the config, +// encodes and decodes data for storage using the codec. +func NewKVStore(cfg KVConfig, codec Codec) (KVClient, error) { if cfg.Mock != nil { return cfg.Mock, nil } switch cfg.Store { case "consul": - codec := ProtoCodec{Factory: ProtoDescFactory} return NewConsulClient(cfg.Consul, codec) case "inmemory": // If we use the in-memory store, make sure everyone gets the same instance // within the same process. inmemoryStoreInit.Do(func() { - inmemoryStore = NewInMemoryKVClient() + inmemoryStore = NewInMemoryKVClient(codec) }) return inmemoryStore, nil default: @@ -46,7 +75,7 @@ func newKVStore(cfg Config) (KVClient, error) { } } -// Codec allows the consult client to serialise and deserialise values. +// Codec allows the consul client to serialise and deserialise values. type Codec interface { Decode([]byte) (interface{}, error) Encode(interface{}) ([]byte, error) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index 5a4d7b3dd903e7bad5342763b8365c5b0f748fae..e75abdef41e8553b159f1e95c61989ba30babf45 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -18,23 +18,23 @@ import ( ) var ( - consulHeartbeats = promauto.NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_consul_heartbeats_total", + consulHeartbeats = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_member_consul_heartbeats_total", Help: "The total number of heartbeats sent to consul.", - }) - tokensOwned = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_ring_tokens_owned", + }, []string{"name"}) + tokensOwned = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_member_ring_tokens_owned", Help: "The number of tokens owned in the ring.", - }) - tokensToOwn = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_ring_tokens_to_own", + }, []string{"name"}) + tokensToOwn = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_member_ring_tokens_to_own", Help: "The number of tokens to own in the ring.", - }) + }, []string{"name"}) shutdownDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "cortex_shutdown_duration_seconds", Help: "Duration (in seconds) of cortex shutdown procedure (ie transfer or flush).", Buckets: prometheus.ExponentialBuckets(10, 2, 8), // Biggest bucket is 10*2^(9-1) = 2560, or 42 mins. - }, []string{"op", "status"}) + }, []string{"op", "status", "name"}) ) // LifecyclerConfig is the config to build a Lifecycler. @@ -61,15 +61,20 @@ type LifecyclerConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *LifecyclerConfig) RegisterFlags(f *flag.FlagSet) { - cfg.RingConfig.RegisterFlags(f) + cfg.RegisterFlagsWithPrefix("", f) +} - f.IntVar(&cfg.NumTokens, "ingester.num-tokens", 128, "Number of tokens for each ingester.") - f.DurationVar(&cfg.HeartbeatPeriod, "ingester.heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.") - f.DurationVar(&cfg.JoinAfter, "ingester.join-after", 0*time.Second, "Period to wait for a claim from another ingester; will join automatically after this.") - f.DurationVar(&cfg.MinReadyDuration, "ingester.min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") - f.BoolVar(&cfg.ClaimOnRollout, "ingester.claim-on-rollout", false, "Send chunks to PENDING ingesters on exit.") - f.BoolVar(&cfg.NormaliseTokens, "ingester.normalise-tokens", false, "Store tokens in a normalised fashion to reduce allocations.") - f.DurationVar(&cfg.FinalSleep, "ingester.final-sleep", 30*time.Second, "Duration to sleep for before exiting, to ensure metrics are scraped.") +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.RingConfig.RegisterFlagsWithPrefix(prefix, f) + + f.IntVar(&cfg.NumTokens, prefix+"num-tokens", 128, "Number of tokens for each ingester.") + f.DurationVar(&cfg.HeartbeatPeriod, prefix+"heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.") + f.DurationVar(&cfg.JoinAfter, prefix+"join-after", 0*time.Second, "Period to wait for a claim from another member; will join automatically after this.") + f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") + f.BoolVar(&cfg.ClaimOnRollout, prefix+"claim-on-rollout", false, "Send chunks to PENDING ingesters on exit.") + f.BoolVar(&cfg.NormaliseTokens, prefix+"normalise-tokens", false, "Store tokens in a normalised fashion to reduce allocations.") + f.DurationVar(&cfg.FinalSleep, prefix+"final-sleep", 30*time.Second, "Duration to sleep for before exiting, to ensure metrics are scraped.") hostname, err := os.Hostname() if err != nil { @@ -78,10 +83,10 @@ func (cfg *LifecyclerConfig) RegisterFlags(f *flag.FlagSet) { } cfg.InfNames = []string{"eth0", "en0"} - f.Var((*flagext.Strings)(&cfg.InfNames), "ingester.interface", "Name of network interface to read address from.") - f.StringVar(&cfg.Addr, "ingester.addr", "", "IP address to advertise in consul.") - f.IntVar(&cfg.Port, "ingester.port", 0, "port to advertise in consul (defaults to server.grpc-listen-port).") - f.StringVar(&cfg.ID, "ingester.ID", hostname, "ID to register into consul.") + f.Var((*flagext.Strings)(&cfg.InfNames), prefix+"lifecycler.interface", "Name of network interface to read address from.") + f.StringVar(&cfg.Addr, prefix+"lifecycler.addr", "", "IP address to advertise in consul.") + f.IntVar(&cfg.Port, prefix+"lifecycler.port", 0, "port to advertise in consul (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.ID, prefix+"lifecycler.ID", hostname, "ID to register into consul.") } // FlushTransferer controls the shutdown of an ingester. @@ -103,8 +108,9 @@ type Lifecycler struct { actorChan chan func() // These values are initialised at startup, and never change - ID string - addr string + ID string + Addr string + RingName string // We need to remember the ingester state just in case consul goes away and comes // back empty. And it changes during lifecycle of ingester. @@ -119,7 +125,7 @@ type Lifecycler struct { } // NewLifecycler makes and starts a new Lifecycler. -func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer) (*Lifecycler, error) { +func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, name string) (*Lifecycler, error) { addr := cfg.Addr if addr == "" { var err error @@ -132,8 +138,8 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer) (*Life if port == 0 { port = *cfg.ListenPort } - - store, err := newKVStore(cfg.RingConfig) + codec := ProtoCodec{Factory: ProtoDescFactory} + store, err := NewKVStore(cfg.RingConfig.KVStore, codec) if err != nil { return nil, err } @@ -143,8 +149,9 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer) (*Life flushTransferer: flushTransferer, KVStore: store, - addr: fmt.Sprintf("%s:%d", addr, port), - ID: cfg.ID, + Addr: fmt.Sprintf("%s:%d", addr, port), + ID: cfg.ID, + RingName: name, quit: make(chan struct{}), actorChan: make(chan func()), @@ -153,7 +160,7 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer) (*Life startTime: time.Now(), } - tokensToOwn.Set(float64(cfg.NumTokens)) + tokensToOwn.WithLabelValues(l.RingName).Set(float64(cfg.NumTokens)) l.done.Add(1) go l.loop() @@ -223,7 +230,7 @@ func (i *Lifecycler) getTokens() []uint32 { } func (i *Lifecycler) setTokens(tokens []uint32) { - tokensOwned.Set(float64(len(tokens))) + tokensOwned.WithLabelValues(i.RingName).Set(float64(len(tokens))) i.stateMtx.Lock() defer i.stateMtx.Unlock() @@ -275,7 +282,7 @@ func (i *Lifecycler) Shutdown() { func (i *Lifecycler) loop() { defer func() { - level.Info(util.Logger).Log("msg", "Ingester.loop() exited gracefully") + level.Info(util.Logger).Log("msg", "member.loop() exited gracefully") i.done.Done() }() @@ -308,7 +315,7 @@ loop: } case <-heartbeatTicker.C: - consulHeartbeats.Inc() + consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { level.Error(util.Logger).Log("msg", "failed to write to consul, sleeping", "err", err) } @@ -336,7 +343,7 @@ heartbeatLoop: for { select { case <-heartbeatTicker.C: - consulHeartbeats.Inc() + consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { level.Error(util.Logger).Log("msg", "failed to write to consul, sleeping", "err", err) } @@ -371,7 +378,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { if !ok { // Either we are a new ingester, or consul must have restarted level.Info(util.Logger).Log("msg", "entry not found in ring, adding with no tokens") - ringDesc.AddIngester(i.ID, i.addr, []uint32{}, i.GetState(), i.cfg.NormaliseTokens) + ringDesc.AddIngester(i.ID, i.Addr, []uint32{}, i.GetState(), i.cfg.NormaliseTokens) return ringDesc, true, nil } @@ -403,7 +410,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context) error { newTokens := GenerateTokens(i.cfg.NumTokens-len(myTokens), takenTokens) i.setState(ACTIVE) - ringDesc.AddIngester(i.ID, i.addr, newTokens, i.GetState(), i.cfg.NormaliseTokens) + ringDesc.AddIngester(i.ID, i.Addr, newTokens, i.GetState(), i.cfg.NormaliseTokens) tokens := append(myTokens, newTokens...) sort.Sort(sortableUint32(tokens)) @@ -428,11 +435,11 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { if !ok { // consul must have restarted level.Info(util.Logger).Log("msg", "found empty ring, inserting tokens") - ringDesc.AddIngester(i.ID, i.addr, i.getTokens(), i.GetState(), i.cfg.NormaliseTokens) + ringDesc.AddIngester(i.ID, i.Addr, i.getTokens(), i.GetState(), i.cfg.NormaliseTokens) } else { ingesterDesc.Timestamp = time.Now().Unix() ingesterDesc.State = i.GetState() - ingesterDesc.Addr = i.addr + ingesterDesc.Addr = i.Addr ringDesc.Ingesters[i.ID] = ingesterDesc } @@ -464,17 +471,17 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { transferStart := time.Now() if err := i.flushTransferer.TransferOut(ctx); err != nil { level.Error(util.Logger).Log("msg", "Failed to transfer chunks to another ingester", "err", err) - shutdownDuration.WithLabelValues("transfer", "fail").Observe(time.Since(transferStart).Seconds()) + shutdownDuration.WithLabelValues("transfer", "fail", i.RingName).Observe(time.Since(transferStart).Seconds()) } else { flushRequired = false - shutdownDuration.WithLabelValues("transfer", "success").Observe(time.Since(transferStart).Seconds()) + shutdownDuration.WithLabelValues("transfer", "success", i.RingName).Observe(time.Since(transferStart).Seconds()) } } if flushRequired { flushStart := time.Now() i.flushTransferer.Flush() - shutdownDuration.WithLabelValues("flush", "success").Observe(time.Since(flushStart).Seconds()) + shutdownDuration.WithLabelValues("flush", "success", i.RingName).Observe(time.Since(flushStart).Seconds()) } // Sleep so the shutdownDuration metric can be collected. @@ -483,6 +490,8 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { // unregister removes our entry from consul. func (i *Lifecycler) unregister(ctx context.Context) error { + level.Debug(util.Logger).Log("msg", "unregistering member from ring") + return i.KVStore.CAS(ctx, ConsulKey, func(in interface{}) (out interface{}, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index feb4880d06be48fa132dd9bcb8da65274061a211..61c1aa3128763a48b9effd81d04f1539b47ff57d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -56,25 +56,27 @@ var ErrEmptyRing = errors.New("empty ring") // Config for a Ring type Config struct { - Consul ConsulConfig `yaml:"consul,omitempty"` - Store string `yaml:"store,omitempty"` + KVStore KVConfig `yaml:"kvstore,omitempty"` HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout,omitempty"` ReplicationFactor int `yaml:"replication_factor,omitempty"` - - Mock KVClient } -// RegisterFlags adds the flags required to config this to the given FlagSet +// RegisterFlags adds the flags required to config this to the given FlagSet with a specified prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.Consul.RegisterFlags(f) + cfg.RegisterFlagsWithPrefix("", f) +} - f.StringVar(&cfg.Store, "ring.store", "consul", "Backend storage to use for the ring (consul, inmemory).") - f.DurationVar(&cfg.HeartbeatTimeout, "ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") - f.IntVar(&cfg.ReplicationFactor, "distributor.replication-factor", 3, "The number of ingesters to write to and read from.") +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet with a specified prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.KVStore.RegisterFlagsWithPrefix(prefix, f) + + f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") + f.IntVar(&cfg.ReplicationFactor, prefix+"distributor.replication-factor", 3, "The number of ingesters to write to and read from.") } // Ring holds the information about the members of the consistent hash ring. type Ring struct { + name string cfg Config KVClient KVClient done chan struct{} @@ -83,47 +85,48 @@ type Ring struct { mtx sync.RWMutex ringDesc *Desc - ingesterOwnershipDesc *prometheus.Desc - numIngestersDesc *prometheus.Desc - totalTokensDesc *prometheus.Desc - numTokensDesc *prometheus.Desc + memberOwnershipDesc *prometheus.Desc + numMembersDesc *prometheus.Desc + totalTokensDesc *prometheus.Desc + numTokensDesc *prometheus.Desc } // New creates a new Ring -func New(cfg Config) (*Ring, error) { +func New(cfg Config, name string) (*Ring, error) { if cfg.ReplicationFactor <= 0 { return nil, fmt.Errorf("ReplicationFactor must be greater than zero: %d", cfg.ReplicationFactor) } - - store, err := newKVStore(cfg) + codec := ProtoCodec{Factory: ProtoDescFactory} + store, err := NewKVStore(cfg.KVStore, codec) if err != nil { return nil, err } r := &Ring{ + name: name, cfg: cfg, KVClient: store, done: make(chan struct{}), ringDesc: &Desc{}, - ingesterOwnershipDesc: prometheus.NewDesc( - "cortex_ring_ingester_ownership_percent", - "The percent ownership of the ring by ingester", - []string{"ingester"}, nil, + memberOwnershipDesc: prometheus.NewDesc( + "cortex_ring_member_ownership_percent", + "The percent ownership of the ring by member", + []string{"member", "name"}, nil, ), - numIngestersDesc: prometheus.NewDesc( - "cortex_ring_ingesters", - "Number of ingesters in the ring", - []string{"state"}, nil, + numMembersDesc: prometheus.NewDesc( + "cortex_ring_members", + "Number of members in the ring", + []string{"state", "name"}, nil, ), totalTokensDesc: prometheus.NewDesc( "cortex_ring_tokens_total", "Number of tokens in the ring", - nil, nil, + []string{"name"}, nil, ), numTokensDesc: prometheus.NewDesc( "cortex_ring_tokens_owned", - "The number of tokens in the ring owned by the ingester", - []string{"ingester"}, nil, + "The number of tokens in the ring owned by the member", + []string{"member", "name"}, nil, ), } var ctx context.Context @@ -293,8 +296,8 @@ func (r *Ring) search(key uint32) int { // Describe implements prometheus.Collector. func (r *Ring) Describe(ch chan<- *prometheus.Desc) { - ch <- r.ingesterOwnershipDesc - ch <- r.numIngestersDesc + ch <- r.memberOwnershipDesc + ch <- r.numMembersDesc ch <- r.totalTokensDesc ch <- r.numTokensDesc } @@ -333,16 +336,18 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { numTokens, ownedRange := countTokens(r.ringDesc) for id, totalOwned := range ownedRange { ch <- prometheus.MustNewConstMetric( - r.ingesterOwnershipDesc, + r.memberOwnershipDesc, prometheus.GaugeValue, float64(totalOwned)/float64(math.MaxUint32), id, + r.name, ) ch <- prometheus.MustNewConstMetric( r.numTokensDesc, prometheus.GaugeValue, float64(numTokens[id]), id, + r.name, ) } @@ -364,15 +369,17 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { for state, count := range byState { ch <- prometheus.MustNewConstMetric( - r.numIngestersDesc, + r.numMembersDesc, prometheus.GaugeValue, float64(count), state, + r.name, ) } ch <- prometheus.MustNewConstMetric( r.totalTokensDesc, prometheus.GaugeValue, float64(len(r.ringDesc.Tokens)), + r.name, ) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index df54ce305a816d972f7335555b55396a9e1a941b..ea26b706badf963925cccb639b03cee7073be0cb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/cortexproject/cortex/pkg/ring/ring.proto +// source: ring.proto package ring @@ -50,7 +50,7 @@ var IngesterState_value = map[string]int32{ } func (IngesterState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7ebe6ffe1686e76b, []int{0} + return fileDescriptor_26381ed67e202a6e, []int{0} } type Desc struct { @@ -61,7 +61,7 @@ type Desc struct { func (m *Desc) Reset() { *m = Desc{} } func (*Desc) ProtoMessage() {} func (*Desc) Descriptor() ([]byte, []int) { - return fileDescriptor_7ebe6ffe1686e76b, []int{0} + return fileDescriptor_26381ed67e202a6e, []int{0} } func (m *Desc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -114,7 +114,7 @@ type IngesterDesc struct { func (m *IngesterDesc) Reset() { *m = IngesterDesc{} } func (*IngesterDesc) ProtoMessage() {} func (*IngesterDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_7ebe6ffe1686e76b, []int{1} + return fileDescriptor_26381ed67e202a6e, []int{1} } func (m *IngesterDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -179,7 +179,7 @@ type TokenDesc struct { func (m *TokenDesc) Reset() { *m = TokenDesc{} } func (*TokenDesc) ProtoMessage() {} func (*TokenDesc) Descriptor() ([]byte, []int) { - return fileDescriptor_7ebe6ffe1686e76b, []int{2} + return fileDescriptor_26381ed67e202a6e, []int{2} } func (m *TokenDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -230,40 +230,37 @@ func init() { proto.RegisterType((*TokenDesc)(nil), "ring.TokenDesc") } -func init() { - proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_7ebe6ffe1686e76b) -} +func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } -var fileDescriptor_7ebe6ffe1686e76b = []byte{ - // 440 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9, - 0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a, - 0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09, - 0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac, - 0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44, - 0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59, - 0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1, - 0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06, - 0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c, - 0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5, - 0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf, - 0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1, - 0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb, - 0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86, - 0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2, - 0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1, - 0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79, - 0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e, - 0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb, - 0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d, - 0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff, - 0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c, - 0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd, - 0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1, - 0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a, - 0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00, +var fileDescriptor_26381ed67e202a6e = []byte{ + // 418 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x4d, 0x6e, 0xd3, 0x40, + 0x14, 0xc7, 0xe7, 0xf9, 0x8b, 0xf8, 0x85, 0x14, 0x6b, 0x40, 0xc8, 0x44, 0x68, 0xb0, 0xb2, 0x32, + 0x48, 0x75, 0xa5, 0xc0, 0x02, 0x21, 0x75, 0xd1, 0x50, 0x0b, 0x39, 0x42, 0xa1, 0x32, 0x55, 0xf7, + 0x4e, 0x3b, 0x18, 0xab, 0xc4, 0xae, 0xec, 0x09, 0x52, 0x77, 0xdc, 0x00, 0x8e, 0xc1, 0x49, 0x50, + 0x97, 0x59, 0x76, 0x85, 0x88, 0xb3, 0x61, 0xd9, 0x23, 0xa0, 0x19, 0xc7, 0x09, 0xd9, 0xbd, 0xdf, + 0xfc, 0x3f, 0x66, 0x9e, 0x65, 0xc4, 0x32, 0xcb, 0xd3, 0xe0, 0xaa, 0x2c, 0x44, 0x41, 0x0d, 0x39, + 0xf7, 0xf7, 0xd3, 0x4c, 0x7c, 0x9e, 0x4f, 0x83, 0xf3, 0x62, 0x76, 0x90, 0x16, 0x69, 0x71, 0xa0, + 0xc4, 0xe9, 0xfc, 0x93, 0x22, 0x05, 0x6a, 0x6a, 0x42, 0x83, 0x5f, 0x80, 0xc6, 0x31, 0xaf, 0xce, + 0xe9, 0x21, 0xda, 0x59, 0x9e, 0xf2, 0x4a, 0xf0, 0xb2, 0x72, 0xc1, 0xd3, 0xfd, 0xee, 0xf0, 0x49, + 0xa0, 0xda, 0xa5, 0x1c, 0x44, 0xad, 0x16, 0xe6, 0xa2, 0xbc, 0x1e, 0x19, 0x37, 0xbf, 0x9f, 0x91, + 0x78, 0x9b, 0xa0, 0xfb, 0x68, 0x89, 0xe2, 0x92, 0xe7, 0x95, 0xab, 0xa9, 0xec, 0x83, 0x26, 0x7b, + 0x2a, 0xcf, 0x64, 0xc1, 0x3a, 0xb1, 0x36, 0xf5, 0x4f, 0x70, 0x6f, 0xb7, 0x91, 0x3a, 0xa8, 0x5f, + 0xf2, 0x6b, 0x17, 0x3c, 0xf0, 0xed, 0x58, 0x8e, 0xd4, 0x47, 0xf3, 0x6b, 0xf2, 0x65, 0xce, 0x5d, + 0xcd, 0x03, 0xbf, 0x3b, 0xa4, 0x4d, 0x63, 0x1b, 0x93, 0xa5, 0x71, 0x63, 0x78, 0xa3, 0xbd, 0x86, + 0xc1, 0x77, 0xc0, 0xfb, 0xff, 0x6b, 0x94, 0xa2, 0x91, 0x5c, 0x5c, 0x94, 0xeb, 0x46, 0x35, 0xd3, + 0xa7, 0x68, 0x8b, 0x6c, 0xc6, 0x2b, 0x91, 0xcc, 0xae, 0x54, 0xad, 0x1e, 0x6f, 0x0f, 0xe8, 0x73, + 0x34, 0x2b, 0x91, 0x08, 0xee, 0xea, 0x1e, 0xf8, 0x7b, 0xc3, 0x87, 0xbb, 0x17, 0x7e, 0x94, 0x52, + 0xdc, 0x38, 0xe8, 0xe3, 0xcd, 0xba, 0x96, 0xa7, 0xfb, 0xbd, 0x76, 0xaf, 0xb1, 0xd1, 0x31, 0x1c, + 0x73, 0x6c, 0x74, 0x4c, 0xc7, 0x1a, 0x1c, 0xa2, 0xbd, 0x59, 0x9f, 0x3e, 0x42, 0x53, 0x59, 0xd4, + 0x73, 0x7a, 0x71, 0x03, 0xb4, 0x8f, 0x9d, 0xf6, 0x13, 0xaa, 0xe7, 0xd8, 0xf1, 0x86, 0x5f, 0x8c, + 0xb0, 0xb7, 0x73, 0x35, 0x45, 0xb4, 0x8e, 0xde, 0x9e, 0x46, 0x67, 0xa1, 0x43, 0x68, 0x17, 0xef, + 0xbd, 0x0f, 0x8f, 0xce, 0xa2, 0xc9, 0x3b, 0x07, 0x24, 0x9c, 0x84, 0x93, 0x63, 0x09, 0x9a, 0x84, + 0xf1, 0x87, 0x68, 0x22, 0x41, 0x1f, 0xbd, 0x5a, 0x2c, 0x19, 0xb9, 0x5d, 0x32, 0x72, 0xb7, 0x64, + 0xf0, 0xad, 0x66, 0xf0, 0xb3, 0x66, 0x70, 0x53, 0x33, 0x58, 0xd4, 0x0c, 0xfe, 0xd4, 0x0c, 0xfe, + 0xd6, 0x8c, 0xdc, 0xd5, 0x0c, 0x7e, 0xac, 0x18, 0x59, 0xac, 0x18, 0xb9, 0x5d, 0x31, 0x32, 0xb5, + 0xd4, 0xaf, 0xf1, 0xf2, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x72, 0x58, 0x13, 0x3d, 0x5d, 0x02, + 0x00, 0x00, } func (x IngesterState) String() string { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/labels.go b/vendor/github.com/cortexproject/cortex/pkg/util/labels.go new file mode 100644 index 0000000000000000000000000000000000000000..2177268af0b3cafdce2d28d999ecdfe6ba89406d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/labels.go @@ -0,0 +1,16 @@ +package util + +import ( + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" +) + +// LabelsToMetric converts a Labels to Metric +// Don't do this on any performance sensitive paths. +func LabelsToMetric(ls labels.Labels) model.Metric { + m := make(model.Metric, len(ls)) + for _, l := range ls { + m[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + return m +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 733a65cd56c7fb9e76660c5713e68d818e315c2b..189bf64e1ec189f4e08aa668148d9ef0b730c0ef 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -11,6 +11,9 @@ type Limits struct { // Distributor enforced limits. IngestionRate float64 `yaml:"ingestion_rate"` IngestionBurstSize int `yaml:"ingestion_burst_size"` + AcceptHASamples bool `yaml:"accept_ha_samples"` + HAClusterLabel string `yaml:"ha_cluster_label"` + HAReplicaLabel string `yaml:"ha_replica_label"` MaxLabelNameLength int `yaml:"max_label_name_length"` MaxLabelValueLength int `yaml:"max_label_value_length"` MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series"` @@ -40,6 +43,9 @@ type Limits struct { func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.") f.IntVar(&l.IngestionBurstSize, "distributor.ingestion-burst-size", 50000, "Per-user allowed ingestion burst size (in number of samples). Warning, very high limits will be reset every -distributor.limiter-reload-period.") + f.BoolVar(&l.AcceptHASamples, "distributor.accept-ha-samples", false, "Per-user flag to enable handling of samples with external labels for identifying replicas in an HA Prometheus setup.") + f.StringVar(&l.HAReplicaLabel, "ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Proemtheus HA replica.") + f.StringVar(&l.HAClusterLabel, "ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Poemtheus HA cluster.") f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go index 7969aed12fe5330c5dbb5578a88530594b86ca1c..bd2653ad4d4ea67a640e5490ebf85da0daa3971a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go @@ -156,6 +156,16 @@ func (o *Overrides) getDuration(userID string, f func(*Limits) time.Duration) ti return f(override) } +func (o *Overrides) getString(userID string, f func(*Limits) string) string { + o.overridesMtx.RLock() + defer o.overridesMtx.RUnlock() + override, ok := o.overrides[userID] + if !ok { + return f(&o.Defaults) + } + return f(override) +} + // IngestionRate returns the limit on ingester rate (samples per second). func (o *Overrides) IngestionRate(userID string) float64 { return o.getFloat(userID, func(l *Limits) float64 { @@ -170,6 +180,27 @@ func (o *Overrides) IngestionBurstSize(userID string) int { }) } +// AcceptHASamples returns whether the distributor should track and accept samples from HA replicas for this user. +func (o *Overrides) AcceptHASamples(userID string) bool { + return o.getBool(userID, func(l *Limits) bool { + return l.AcceptHASamples + }) +} + +// HAReplicaLabel returns the replica label to look for when deciding whether to accept a sample from a Prometheus HA replica. +func (o *Overrides) HAReplicaLabel(userID string) string { + return o.getString(userID, func(l *Limits) string { + return l.HAReplicaLabel + }) +} + +// HAClusterLabel returns the cluster label to look for when deciding whether to accept a sample from a Prometheus HA replica. +func (o *Overrides) HAClusterLabel(userID string) string { + return o.getString(userID, func(l *Limits) string { + return l.HAClusterLabel + }) +} + // MaxLabelNameLength returns maximum length a label name can be. func (o *Overrides) MaxLabelNameLength(userID string) int { return o.getInt(userID, func(l *Limits) int {