I have loki running in container together with grafana and promtail. My docker-compose.yaml
version: "3"
networks:
loki:
driver: bridge
ipam:
config:
- subnet: 192.180.2.0/24
services:
loki:
image: grafana/loki:2.8.6
container_name: loki
volumes:
- ./loki-config.yaml:/etc/loki/loki-config.yaml
- /etc/localtime:/etc/localtime:ro
- /data:/tmp
command: -config.file=/etc/loki/loki-config.yaml
ports:
- "3100:3100"
networks:
- loki
logging:
driver: "json-file"
options:
max-size: 1m
max-file: "3"
tag: "common.logging.loki.docker"
promtail:
image: grafana/promtail:2.4.2
container_name: promtail
restart: always
volumes:
- /var/lib/docker/:/var/lib/docker:ro
- /var/log:/var/log
- ./promtail-local-config.yaml:/etc/promtail/config.yaml:ro
- /etc/localtime:/etc/localtime:ro
command: -config.file=/etc/promtail/config.yaml
networks:
- loki
ports:
- "1514:1514/tcp"
- "9080:9080/tcp"
logging:
driver: "json-file"
options:
max-size: 1m
max-file: "3"
tag: "common.logging.promtail.docker"
grafana:
image: grafana/grafana:10.2.0
container_name: grafana
restart: always
environment:
- GF_DATAPROXY_TIMEOUT=1200
- GF_DATAPROXY_KEEP_ALIVE_SECONDS=300
- GF_LOG_LEVEL=debug
ports:
- "3000:3000"
networks:
- loki
volumes:
- /etc/localtime:/etc/localtime:ro
- /opt/grafana_data:/var/lib/grafana
- /opt/grafana_etc:/etc/grafana
logging:
driver: "json-file"
options:
max-size: 1m
max-file: "3"
tag: "common.logging.grafana.docker"
auth_enabled: false
server:
http_listen_port: 3100
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v12
index:
prefix: index_
period: 24h
common:
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
limits_config:
split_queries_by_interval: 15m
max_entries_limit_per_query: 15000
query_range:
# make queries more cache-able by aligning them with their step intervals
align_queries_with_step: true
max_retries: 5
parallelise_shardable_queries: true
cache_results: true
ruler:
storage:
type: local
local:
directory: /tmp/rules
I'm able to query logs. I run a first query to get logs for last 5 minutes. I get nice resposne with 4200 log lines processed in .2s Quiery inspector I have gotten responses for 6 minutes, 7 minutes sometimes 11 and 12 when there was not so much logs yet incoming. But going further the query goes on for some time and then I get Error: net/http: request canceled (Client.Timeout exceeded while awaiting headers) What could be a problem?