使用Docker Compose启动ELK(Elasticsearch Logstash Kibana)、Filebeat和nginx

首先

上一次,我在Docker上运行了nginx,并使用Logstash将访问日志保存到elasticsearch中。
现在,我已经改用Filebeat将日志发送到logstash。

我已经将源代码上传到 GitHub。

环境

    • docker-compoase

 

    • elasticsearch

 

    • kibana

 

    • Logstash

 

    • Filebeat

 

    nginx

目录结构

└── es_logstash
    └── es_d
        ├── docker-compose.yml
        ├── Dockerfile
        └── config
            └── elasticsearch.yml
    └── kibana_d
        ├── docker-compose.yml
        ├── Dockerfile
        └── config
            └── kibana.yml
    └── logstash_d
        ├── docker-compose.yml
        ├── Dockerfile
        └── config
            └── logstash.conf
    └── beats_d
        ├── docker-compose.yml
        ├── Dockerfile
        └── config
            └── filebeat.conf
    └── nginx_d
        └── docker-compose.yml
version: '2'
services:
  elasticsearch:
    mem_limit: 1024m
    build: .
    container_name: es_c_el
    image: es_i_el:1.0.5
    volumes:
      - ../data/es:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    environment:
      - ES_JAVA_OPTS=-Xms512m -Xmx512m
FROM docker.elastic.co/elasticsearch/elasticsearch:6.2.3

COPY ./config/elasticsearch.yml /usr/share/elasticsearch/config/elasticsearch.yml

# kuromojiをインストール
RUN elasticsearch-plugin  install analysis-kuromoji

# RUN elasticsearch-plugin remove x-pack
http.host: 0.0.0.0

cluster.name: "docker-cluster"
discovery.type: single-node

### x-pack functions
xpack.security.enabled: false
# 無償利用は1クラスタまで
xpack.monitoring.enabled: true
xpack.graph.enabled: false
xpack.watcher.enabled: false
version: '2'
services:
  kibana:
    mem_limit: 512m
    build: .
    container_name: kibana_c_el
    image: kibana_i_el:1.0.4
    external_links:
      - elasticsearch
    ports:
      - 5601:5601
    networks:
      - default
      - es1_default

networks:
  es1_default:
    external:
      name: es_d_default
FROM docker.elastic.co/kibana/kibana:6.2.3

COPY ./config/kibana.yml /opt/kibana/config/kibana.yml
# RUN kibana-plugin remove x-pack
server.name: kibana
server.host: "0"
elasticsearch.url: http://elasticsearch:9200
elasticsearch.username: elastic
elasticsearch.password: changeme
# xpack.monitoring.ui.container.elasticsearch.enabled: true
version: '2'
services:
  logstash:
    mem_limit: 512m
    build: .
    container_name: logstash_c_el
    image: logstash_i_el:1.0.21
    external_links:
      - elasticsearch
    ports:
      - 5044:5044
    networks:
      - default
      - es1_default

networks:
  es1_default:
    external:
      name: es_d_default
FROM docker.elastic.co/logstash/logstash:6.2.3

ADD ./config/logstash.conf /usr/share/logstash/pipeline/logstash.conf
input {
  beats {
    port => 5044
  }
}

filter {
  grok {
    match => { "message" => ["%{IPORHOST:[nginx][access][remote_ip]} - %{DATA:[nginx][access][user_name]} \[%{HTTPDATE:[nginx][access][time]}\] \"%{WORD:[nginx][access][method]} %{DATA:[nginx][access][url]} HTTP/%{NUMBER:[nginx][access][http_version]}\" %{NUMBER:[nginx][access][response_code]} %{NUMBER:[nginx][access][body_sent][bytes]} \"%{DATA:[nginx][access][referrer]}\" \"%{DATA:[nginx][access][agent]}\""] }
    remove_field => "message"
  }
  mutate {
    add_field => { "read_timestamp" => "%{@timestamp}" }
  }
  date {
    match => [ "[nginx][access][time]", "dd/MMM/YYYY:H:m:s Z" ]
    remove_field => "[nginx][access][time]"
  }
  useragent {
    source => "[nginx][access][agent]"
    target => "[nginx][access][user_agent]"
    remove_field => "[nginx][access][agent]"
  }
  geoip {
    source => "[nginx][access][remote_ip]"
    target => "[nginx][access][geoip]"
  }
}

output {
  elasticsearch {
    hosts => [ 'elasticsearch' ]
    index => "access_log1"
  }
}
version: '2'
services:
  beats:
    mem_limit: 512m
    build: .
    container_name: beats_c_el
    image: beats_i_el:1.0.1
    volumes:
      - ../data/nginx:/var/log/nginx/
    external_links:
      - logstash
    networks:
      - default
      - logstash1_default

networks:
  logstash1_default:
    external:
      name: logstash_d_default
filebeat:
  prospectors:
    - paths:
        - /var/log/nginx/access.log
      input_type: log

output:
  logstash:
    hosts: ["logstash:5044"]
version: '2'
services:
  web:
    mem_limit: 512m
    image: nginx:1.10
    ports:
      - "80:80"
    volumes:
      - ../data/nginx:/var/log/nginx

确认动作

启动容器

将容器按照顺序升级至elasticsearch、Logstash、kibana和nginx。

$ docker-compose up -d

访问nginx

$ curl http://localhost

access.log会被更新,然后通过Filebeat和Logstash传输到elasticsearch中储存。

弹性搜索

$ curl -XGET 'http://localhost:9200/_cat/count/access_log1'

将返回访问NGINX的次数。

基本上只需要一个选项就好:Kibana。

如果访问Kibana并执行GET /access_log1/_search?pretty=true,您就可以确认。

最后

Elasticsearch非常深。

Github (Github)

广告
将在 10 秒后关闭
bannerAds