Skip to content

Commit

Permalink
Merge branch 'main' of github.com:QuantumEnigmaa/loki into helm-add-d…
Browse files Browse the repository at this point in the history
…edicated-serviceaccount-ruler
  • Loading branch information
QuantumEnigmaa committed Feb 25, 2025
2 parents a8eb5a6 + 290c14a commit bf151af
Show file tree
Hide file tree
Showing 1,409 changed files with 80,166 additions and 24,250 deletions.
2 changes: 1 addition & 1 deletion clients/cmd/fluent-bit/loki.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ func extractLabels(records map[string]interface{}, keys []string) model.LabelSet
}
ln := model.LabelName(k)
// skips invalid name and values
if !ln.IsValid() {
if !ln.IsValidLegacy() {
continue
}
lv := model.LabelValue(fmt.Sprintf("%v", v))
Expand Down
4 changes: 2 additions & 2 deletions clients/pkg/logentry/stages/eventlogmessage.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func validateEventLogMessageConfig(c *EventLogMessageConfig) error {
if c == nil {
return errors.New(ErrEmptyEvtLogMsgStageConfig)
}
if c.Source != nil && !model.LabelName(*c.Source).IsValid() {
if c.Source != nil && !model.LabelName(*c.Source).IsValidLegacy() {
return fmt.Errorf(ErrInvalidLabelName, *c.Source)
}
return nil
Expand Down Expand Up @@ -108,7 +108,7 @@ func (m *eventLogMessageStage) processEntry(extracted map[string]interface{}, ke
continue
}
mkey := parts[0]
if !model.LabelName(mkey).IsValid() {
if !model.LabelName(mkey).IsValidLegacy() {
if m.cfg.DropInvalidLabels {
if Debug {
level.Debug(m.logger).Log("msg", "invalid label parsed from message", "key", mkey)
Expand Down
2 changes: 1 addition & 1 deletion clients/pkg/logentry/stages/labels.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func validateLabelsConfig(c LabelsConfig) error {
return errors.New(ErrEmptyLabelStageConfig)
}
for labelName, labelSrc := range c {
if !model.LabelName(labelName).IsValid() {
if !model.LabelName(labelName).IsValidLegacy() {
return fmt.Errorf(ErrInvalidLabelName, labelName)
}
// If no label source was specified, use the key name
Expand Down
34 changes: 16 additions & 18 deletions clients/pkg/promtail/discovery/consulagent/consul.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net"
"net/http"
"os"
"strconv"
"strings"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
consul "github.com/hashicorp/consul/api"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/pkg/errors"
Expand Down Expand Up @@ -152,19 +152,19 @@ type Discovery struct {
allowStale bool
refreshInterval time.Duration
finalizer func()
logger log.Logger
logger *slog.Logger
metrics *consulMetrics
}

// NewDiscovery returns a new Discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}

if logger == nil {
logger = log.NewNopLogger()
logger = slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelInfo}))
}

tls, err := config.NewTLSConfig(&conf.TLSConfig)
Expand Down Expand Up @@ -265,15 +265,15 @@ func (d *Discovery) getDatacenter() error {
}
info, err := d.client.Agent().Self()
if err != nil {
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
d.logger.Error("Error retrieving datacenter name", "err", err)
d.metrics.rpcFailuresCount.Inc()
return err
}

dc, ok := info["Config"]["Datacenter"].(string)
if !ok {
err := errors.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
d.logger.Error("Error retrieving datacenter name", "err", err)
return err
}

Expand Down Expand Up @@ -342,7 +342,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
// entire list of services.
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, services map[string]func(error)) {
agent := d.client.Agent()
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","))

t0 := time.Now()
srvs, err := agent.Services()
Expand All @@ -357,7 +357,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
}

if err != nil {
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
d.logger.Error("Error refreshing service list", "err", err)
d.metrics.rpcFailuresCount.Inc()
time.Sleep(retryInterval)
return
Expand Down Expand Up @@ -386,9 +386,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
// Check for removed services.
for name, cancel := range services {
if _, ok := discoveredServices[name]; !ok {
level.Debug(d.logger).Log(
"msg", "removing service since consul no longer has a record of it",
"name", name)
d.logger.Debug("removing service since consul no longer has a record of it", "name", name)
// Call the watch cancellation function.
cancel(errors.New("canceling service since consul no longer has a record of it"))
delete(services, name)
Expand Down Expand Up @@ -420,7 +418,7 @@ type consulService struct {
discovery *Discovery
client *consul.Client
tagSeparator string
logger log.Logger
logger *slog.Logger
rpcFailuresCount prometheus.Counter
serviceRPCDuration prometheus.Observer
}
Expand Down Expand Up @@ -464,7 +462,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G

// Get updates for a service.
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, agent *consul.Agent) {
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))

t0 := time.Now()
aggregatedStatus, serviceChecks, err := agent.AgentHealthServiceByName(srv.name)
Expand All @@ -480,26 +478,26 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
}

if err != nil {
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
srv.rpcFailuresCount.Inc()
time.Sleep(retryInterval)
return
}

self, err := agent.Self()
if err != nil {
level.Error(srv.logger).Log("msg", "failed to get agent info from agent api", "err", err)
srv.logger.Error("failed to get agent info from agent api", "err", err)
return
}
var member = consul.AgentMember{}
memberBytes, err := json.Marshal(self["Member"])
if err != nil {
level.Error(srv.logger).Log("msg", "failed to get member information from agent", "err", err)
srv.logger.Error("failed to get member information from agent", "err", err)
return
}
err = json.Unmarshal(memberBytes, &member)
if err != nil {
level.Error(srv.logger).Log("msg", "failed to unmarshal member information from agent", "err", err)
srv.logger.Error("failed to unmarshal member information from agent", "err", err)
return
}

Expand Down
5 changes: 3 additions & 2 deletions clients/pkg/promtail/discovery/consulagent/consul_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@ package consulagent

import (
"context"
"log/slog"
"net/http"
"net/http/httptest"
"net/url"
"os"
"testing"
"time"

"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -364,7 +365,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
}

func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
logger := log.NewNopLogger()
logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelInfo}))
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
d, err := NewDiscovery(config, logger, metrics)
require.NoError(t, err)
Expand Down
2 changes: 1 addition & 1 deletion clients/pkg/promtail/promtail_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ func getPromMetrics(t *testing.T, httpListenAddr net.Addr) ([]byte, string) {
func parsePromMetrics(t *testing.T, bytes []byte, contentType string, metricName string, label string) map[string]float64 {
rb := map[string]float64{}

pr, err := textparse.New(bytes, contentType, false, nil)
pr, err := textparse.New(bytes, contentType, "", false, false, nil)
require.NoError(t, err)
for {
et, err := pr.Next()
Expand Down
3 changes: 2 additions & 1 deletion clients/pkg/promtail/targets/docker/targetmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (
"github.com/grafana/loki/v3/clients/pkg/promtail/targets/target"

"github.com/grafana/loki/v3/pkg/util"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)

const (
Expand Down Expand Up @@ -60,7 +61,7 @@ func NewTargetManager(
positions: positions,
manager: discovery.NewManager(
ctx,
log.With(logger, "component", "docker_discovery"),
util_log.SlogFromGoKit(log.With(logger, "component", "docker_discovery")),
noopRegistry,
noopSdMetrics,
),
Expand Down
3 changes: 2 additions & 1 deletion clients/pkg/promtail/targets/file/filetargetmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/grafana/loki/v3/clients/pkg/promtail/targets/target"

"github.com/grafana/loki/v3/pkg/util"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)

const (
Expand Down Expand Up @@ -84,7 +85,7 @@ func NewFileTargetManager(
syncers: map[string]*targetSyncer{},
manager: discovery.NewManager(
ctx,
log.With(logger, "component", "discovery"),
util_log.SlogFromGoKit(log.With(logger, "component", "discovery")),
noopRegistry,
noopSdMetrics,
),
Expand Down
2 changes: 1 addition & 1 deletion clients/pkg/promtail/targets/heroku/target.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func (h *Target) run() error {
// To prevent metric collisions because all metrics are going to be registered in the global Prometheus registry.

tentativeServerMetricNamespace := "promtail_heroku_drain_target_" + h.jobName
if !model.IsValidMetricName(model.LabelValue(tentativeServerMetricNamespace)) {
if !model.LabelName(tentativeServerMetricNamespace).IsValidLegacy() {
return fmt.Errorf("invalid prometheus-compatible job name: %s", h.jobName)
}
h.config.Server.MetricsNamespace = tentativeServerMetricNamespace
Expand Down
3 changes: 2 additions & 1 deletion clients/pkg/promtail/wal/wal.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"

"github.com/grafana/loki/v3/pkg/ingester/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)

var (
Expand Down Expand Up @@ -37,7 +38,7 @@ type wrapper struct {
func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) {
// TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk
// yet. This will attest for the lack of buffering in the channel Writer exposes.
tsdbWAL, err := wlog.NewSize(log, registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone)
tsdbWAL, err := wlog.NewSize(util_log.SlogFromGoKit(log), registerer, cfg.Dir, wlog.DefaultSegmentSize, wlog.CompressionNone)
if err != nil {
return nil, fmt.Errorf("failde to create tsdb WAL: %w", err)
}
Expand Down
3 changes: 2 additions & 1 deletion clients/pkg/promtail/wal/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog"

"github.com/grafana/loki/v3/pkg/ingester/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)

const (
Expand Down Expand Up @@ -160,7 +161,7 @@ func (w *Watcher) watch(segmentNum int) error {
}
defer segment.Close()

reader := wlog.NewLiveReader(w.logger, nil, segment)
reader := wlog.NewLiveReader(util_log.SlogFromGoKit(w.logger), nil, segment)

readTimer := newBackoffTimer(w.minReadFreq, w.maxReadFreq)

Expand Down
4 changes: 2 additions & 2 deletions docs/sources/get-started/labels/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ A label is a key-value pair, for example all of the following are labels:
A set of log messages which shares all the labels above would be called a log stream. When Loki performs searches, it first looks for all messages in your chosen stream, and then iterates through the logs in the stream to perform your query.

Labeling will affect your queries, which in turn will affect your dashboards.
It’s worth spending the time to think about your labeling strategy before you begin ingesting logs to Loki.
It’s worth spending the time to think about your labeling strategy before you begin ingesting logs to Loki.

## Default labels for all users

Expand All @@ -40,7 +40,7 @@ Loki does not parse or process your log messages on ingestion. However, dependin

Loki automatically tries to populate a default `service_name` label while ingesting logs. The service name label is used to find and explore logs in the following Grafana and Grafana Cloud features:

- Explore Logs
- Logs Drilldown
- Grafana Cloud Application Observability

{{< admonition type="note" >}}
Expand Down
2 changes: 1 addition & 1 deletion docs/sources/get-started/labels/structured-metadata.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ You should only use structured metadata in the following situations:

- If you are ingesting data in OpenTelemetry format, using Grafana Alloy or an OpenTelemetry Collector. Structured metadata was designed to support native ingestion of OpenTelemetry data.
- If you have high cardinality metadata that should not be used as a label and does not exist in the log line. Some examples might include `process_id` or `thread_id` or Kubernetes pod names.
- If you are using [Explore Logs](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/logs/) to visualize and explore your Loki logs. You must set `discover_log_levels` and `allow_structured_metadata` to `true` in your Loki configuration.
- If you are using [Logs Drilldown](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/logs/) to visualize and explore your Loki logs. You must set `discover_log_levels` and `allow_structured_metadata` to `true` in your Loki configuration.
- If you are a large-scale customer, who is ingesting more than 75TB of logs a month and are using [Bloom filters](https://grafana.com/docs/loki/<LOKI_VERSION>/operations/bloom-filters/) (Experimental), starting in [Loki 3.3](https://grafana.com/docs/loki/<LOKI_VERSION>/release-notes/v3-3/) Bloom filters now utilize structured metadata.

## Enable or disable structured metadata
Expand Down
12 changes: 6 additions & 6 deletions docs/sources/send-data/k8s-monitoring-helm/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,12 +168,12 @@ As before, the command also includes a `values` file that specifies the configur

## Deploy the Kubernetes Monitoring Helm chart

The Kubernetes Monitoring Helm chart is used for gathering, scraping, and forwarding Kubernetes telemetry data to a Grafana stack. This includes the ability to collect metrics, logs, traces, and continuous profiling data. The scope of this tutorial is to deploy the Kubernetes Monitoring Helm chart to collect pod logs and Kubernetes events.
The Kubernetes Monitoring Helm chart is used for gathering, scraping, and forwarding Kubernetes telemetry data to a Grafana stack. This includes the ability to collect metrics, logs, traces, and continuous profiling data. The scope of this tutorial is to deploy the Kubernetes Monitoring Helm chart to collect pod logs and Kubernetes events.

To deploy the Kubernetes Monitoring Helm chart run the following command:

```bash
helm install --values ./k8s-monitoring-values.yml k8s grafana/k8s-monitoring -n meta
helm install --values ./k8s-monitoring-values.yml k8s grafana/k8s-monitoring -n meta
```
Within the configuration file `k8s-monitoring-values.yml` we have defined the following:

Expand Down Expand Up @@ -260,14 +260,14 @@ kubectl --namespace meta port-forward $POD_NAME 3000 --address 0.0.0.0
This will make your terminal unusable until you stop the port-forwarding process. To stop the process, press `Ctrl + C`.
{{< /admonition >}}

This command will port-forward the Grafana service to your local machine on port `3000`.
This command will port-forward the Grafana service to your local machine on port `3000`.

You can now access Grafana by navigating to [http://localhost:3000](http://localhost:3000) in your browser. The default credentials are `admin` and `adminadminadmin`.
You can now access Grafana by navigating to [http://localhost:3000](http://localhost:3000) in your browser. The default credentials are `admin` and `adminadminadmin`.

One of the first places you should visit is Explore Logs which lets you automatically visualize and explore your logs without having to write queries:
One of the first places you should visit is Logs Drilldown which lets you automatically visualize and explore your logs without having to write queries:
[http://localhost:3000/a/grafana-lokiexplore-app](http://localhost:3000/a/grafana-lokiexplore-app)

{{< figure max-width="100%" src="/media/docs/loki/k8s-logs-explore-logs.png" caption="Explore Logs view of K8s logs" alt="Explore Logs view of K8s logs" >}}
{{< figure max-width="100%" src="/media/docs/loki/k8s-logs-explore-logs.png" caption="Logs Drilldown view of K8s logs" alt="Logs Drilldown view of K8s logs" >}}

<!-- INTERACTIVE page step6.md END -->

Expand Down
15 changes: 15 additions & 0 deletions docs/sources/shared/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -847,6 +847,11 @@ dataobj:
# CLI flag: -dataobj-consumer.sha-prefix-size
[shaprefixsize: <int> | default = 2]

# The maximum amount of time to wait in seconds before flushing an object
# that is no longer receiving new writes
# CLI flag: -dataobj-consumer.idle-flush-timeout
[idle_flush_timeout: <duration> | default = 1h]

querier:
# Enable the dataobj querier.
# CLI flag: -dataobj-querier-enabled
Expand Down Expand Up @@ -2414,6 +2419,10 @@ The `gcs_storage_config` block configures the connection to Google Cloud Storage
# CLI flag: -<prefix>.gcs.bucketname
[bucket_name: <string> | default = ""]
# Custom GCS endpoint URL.
# CLI flag: -<prefix>.gcs.endpoint
[endpoint: <string> | default = ""]
# Service account key content in JSON format, refer to
# https://cloud.google.com/iam/docs/creating-managing-service-account-keys for
# creation.
Expand Down Expand Up @@ -4047,6 +4056,12 @@ engine:
# CLI flag: -querier.engine.max-count-min-sketch-heap-size
[max_count_min_sketch_heap_size: <int> | default = 10000]
# Enable experimental support for running multiple query variants over the
# same underlying data. For example, running both a rate() and
# count_over_time() query over the same range selector.
# CLI flag: -querier.engine.enable-multi-variant-queries
[enable_multi_variant_queries: <boolean> | default = false]
# The maximum number of queries that can be simultaneously processed by the
# querier.
# CLI flag: -querier.max-concurrent
Expand Down
Loading

0 comments on commit bf151af

Please sign in to comment.