Skip to content
Snippets Groups Projects
Commit f6d5ebf5 authored by Tom Wilkie's avatar Tom Wilkie
Browse files

Document how to get promtail running locally.


Signed-off-by: default avatarTom Wilkie <tom.wilkie@gmail.com>
parent ae488b4d
No related branches found
No related tags found
No related merge requests found
......@@ -6,3 +6,4 @@ mixin/vendor/
cmd/tempo/tempo
cmd/promtail/promtail
/tempo
/promtail
......@@ -8,11 +8,25 @@ not index the contents of the logs, but rather a set of labels for each log stea
## Run it locally
Tempo can be run in a single host, no-dependencies mode using the following commands:
Tempo can be run in a single host, no-dependencies mode using the following commands.
Tempo consists of 3 components; `tempo` is the main server, responsible for storing
logs and processing queries. `promtail` is the agent, responsible for gather logs
and sending them to tempo and `grafana` as the UI.
To run tempo, use the following commands:
```
$ go build ./cmd/tempo
$ ./tempo -config.file=./docs/local.yaml
$ ./tempo -config.file=./docs/tempo-local-config.yaml
...
```
To run promtail, use the following commands:
```
$ go build ./cmd/promtail
$ ./promtail -config.file=./docs/promtail-local-config.yaml -positions.file=./positions.yaml -client.url=http://localhost/api/prom/push
...
```
......@@ -89,73 +103,3 @@ Args:
<query> eg '{foo="bar",baz="blip"}'
[<regex>]
```
## API
*nb* Authentication is out of scope for this project. You are expected to run an
authenticating reverse proxy in front of our services, such as an Nginx with basic
auth or a OAuth2 proxy.
There are 4 API endpoints:
- `POST /api/prom/push`
For sending log entries, expects a snappy compresses proto in the HTTP Body.
- `GET /api/prom/query`
For doing queries, accepts the following paramters in the query-string:
- `query`: a logQL query
- `limit`: max number of entries to return
- `start`: the start time for the query, as a nanosecond Unix epoch (nanoseconds since 1970)
- `end`: the end time for the query, as a nanosecond Unix epoch (nanoseconds since 1970)
- `direction`: `forward` or `backward`, useful when specifying a limit
- `regexp`: a regex to filter the returned results, will eventually be rolled into the query language
Responses looks like this:
```
{
"streams": [
{
"labels": "{instance=\"...\", job=\"...\", namespace=\"...\"}",
"entries": [
{
"timestamp": "2018-06-27T05:20:28.699492635Z",
"line": "..."
},
...
]
},
...
]
}
```
- `GET /api/prom/label`
For retrieving the names of the labels one can query on.
Responses looks like this:
```
{
"values": [
"instance",
"job",
...
]
}
```
- `GET /api/prom/label/<name>/values`
For retrieving the label values one can query on.
Responses looks like this:
```
{
"values": [
"default",
"cortex-ops",
...
]
}
```
......@@ -62,7 +62,7 @@ func readConfig(filename string, cfg *tempo.Config) error {
return errors.Wrap(err, "Error reading config file: %v")
}
if err := yaml.Unmarshal(buf, &cfg); err != nil {
if err := yaml.UnmarshalStrict(buf, &cfg); err != nil {
return errors.Wrap(err, "Error reading config file: %v")
}
return nil
......
## API
*nb* Authentication is out of scope for this project. You are expected to run an
authenticating reverse proxy in front of our services, such as an Nginx with basic
auth or a OAuth2 proxy.
There are 4 API endpoints:
- `POST /api/prom/push`
For sending log entries, expects a snappy compresses proto in the HTTP Body.
- `GET /api/prom/query`
For doing queries, accepts the following paramters in the query-string:
- `query`: a logQL query
- `limit`: max number of entries to return
- `start`: the start time for the query, as a nanosecond Unix epoch (nanoseconds since 1970)
- `end`: the end time for the query, as a nanosecond Unix epoch (nanoseconds since 1970)
- `direction`: `forward` or `backward`, useful when specifying a limit
- `regexp`: a regex to filter the returned results, will eventually be rolled into the query language
Responses looks like this:
```
{
"streams": [
{
"labels": "{instance=\"...\", job=\"...\", namespace=\"...\"}",
"entries": [
{
"timestamp": "2018-06-27T05:20:28.699492635Z",
"line": "..."
},
...
]
},
...
]
}
```
- `GET /api/prom/label`
For retrieving the names of the labels one can query on.
Responses looks like this:
```
{
"values": [
"instance",
"job",
...
]
}
```
- `GET /api/prom/label/<name>/values`
For retrieving the label values one can query on.
Responses looks like this:
```
{
"values": [
"default",
"cortex-ops",
...
]
}
```
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: system
__path__: /var/log/system.log
File moved
......@@ -4,20 +4,17 @@ import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
sd_config "github.com/prometheus/prometheus/discovery/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
// Config for promtail, describing what files to watch.
type Config struct {
ScrapeConfig []ScrapeConfig `yaml:"scrape_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
}
// LoadConfig loads config from a file.
......@@ -28,34 +25,19 @@ func LoadConfig(filename string) (*Config, error) {
}
var cfg Config
if err := yaml.Unmarshal(buf, &cfg); err != nil {
if err := yaml.UnmarshalStrict(buf, &cfg); err != nil {
return nil, err
}
return &cfg, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain Config
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if err = checkOverflow(c.XXX, "config"); err != nil {
return err
}
return nil
}
// ScrapeConfig describes a job to scrape.
type ScrapeConfig struct {
JobName string `yaml:"job_name,omitempty"`
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
RelabelConfigs []*config.RelabelConfig `yaml:"relabel_configs,omitempty"`
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"`
StaticConfig targetgroup.Group `yaml:"static_config"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
......@@ -65,22 +47,8 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
if err = checkOverflow(c.XXX, "scrape_config"); err != nil {
return err
}
if len(c.JobName) == 0 {
return fmt.Errorf("job_name is empty")
}
return nil
}
func checkOverflow(m map[string]interface{}, ctx string) error {
if len(m) > 0 {
var keys []string
for k := range m {
keys = append(keys, k)
}
return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", "))
}
return nil
}
......@@ -111,7 +111,7 @@ func readPositionsFile(filename string) (map[string]int64, error) {
}
var p positionsFile
if err := yaml.Unmarshal(buf, &p); err != nil {
if err := yaml.UnmarshalStrict(buf, &p); err != nil {
return nil, err
}
......
......@@ -8,6 +8,7 @@ import (
"github.com/grafana/tempo/pkg/helpers"
"github.com/hpcloud/tail"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
log "github.com/sirupsen/logrus"
......@@ -41,16 +42,14 @@ type Target struct {
// NewTarget create a new Target.
func NewTarget(c *Client, positions *Positions, path string, labels model.LabelSet) (*Target, error) {
log.Info("newTarget", labels)
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "fsnotify.NewWatcher")
}
if err := watcher.Add(path); err != nil {
helpers.LogError("closing watcher", watcher.Close)
return nil, err
return nil, errors.Wrap(err, "watcher.Add")
}
t := &Target{
......@@ -66,7 +65,7 @@ func NewTarget(c *Client, positions *Positions, path string, labels model.LabelS
// Fist, we're going to add all the existing files
fis, err := ioutil.ReadDir(t.path)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "ioutil.ReadDir")
}
for _, fi := range fis {
if fi.IsDir() {
......
......@@ -61,7 +61,6 @@ func NewTargetManager(
}
config := map[string]sd_config.ServiceDiscoveryConfig{}
for _, cfg := range scrapeConfig {
s := &syncer{
log: logger,
......@@ -75,6 +74,7 @@ func NewTargetManager(
}
go tm.run()
go tm.manager.Run()
return tm, tm.manager.ApplyConfig(config)
}
......@@ -109,10 +109,13 @@ func (s *syncer) Sync(groups []*targetgroup.Group) {
for _, group := range groups {
for _, t := range group.Targets {
level.Debug(s.log).Log("msg", "new target", "labels", t)
labels := group.Labels.Merge(t)
labels = relabel.Process(labels, s.relabelConfig...)
// Drop empty targets (drop in relabeling).
if labels == nil {
level.Debug(s.log).Log("msg", "dropping target, no labels")
continue
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment