|
再探ELK-全新的ELK-5.2.0
2017/2/15
一、环境
1、RPM
1)收集 rpm 包
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.2.0.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-5.2.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/logstash/logstash-5.2.0.rpm
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.0-x86_64.rpm
2)缓存rpm包到本地yum源
2、安装
【服务端】
1)ELK
[root@vm220 ~]# yum install elasticsearch kibana logstash -y
2)jdk
(略)
【客户端】
1)filebeat
[root@vm49 ~]# yum install filebeat -y
3、前提
假设要收集下面2个域名的 access 和 error 日志:
www.test.com
www.work.com
其中 access 日志的格式如下:
log_format online '$remote_addr [$time_local] "$request" '
'"$http_content_type" "$request_body" "$http_referer" '
'$status $request_time $body_bytes_sent';
而 error 日志采取默认的级别(error)。
且要求:为每个域名使用独立的 index
二、ELK 服务端配置
1、elasticsearch
1)配置文件
[root@vm220 ~]# mkdir -p /data/elasticsearch
[root@vm220 ~]# chown elasticsearch:elasticsearch /data/elasticsearch
[root@vm220 ~]# cp -a /etc/elasticsearch/elasticsearch.yml{,.bak}
调整配置文件:
【如果 ES 是单节点】
[root@vm220 ~]# grep ^[^#] /etc/elasticsearch/elasticsearch.yml
cluster.name: es-cluster-test
node.name: node-vm220
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.system_call_filter: false
network.host: 10.50.200.220
【如果 elasticsearch 是集群】
[root@vm220 ~]# grep ^[^#] /etc/elasticsearch/elasticsearch.yml
cluster.name: es-cluster-test
node.name: node-vm220
path.data: /data/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.system_call_filter: false
network.host: 10.50.200.220
discovery.zen.ping.unicast.hosts: ["10.50.200.218", "10.50.200.219", "10.50.200.220"]
discovery.zen.minimum_master_nodes: 3
其他节点类似
【特别说明】bootstrap.system_call_filter: false
由于内核限制,在 centos6 下无法安装 syscall filter 报错信息如下:
[2017-02-13T14:14:00,689][WARN ][o.e.b.JNANatives ] unable to install syscall filter:
java.lang.UnsupportedOperationException: seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in
2)启动服务
[root@vm220 ~]# service elasticsearch start
[root@vm220 ~]# chkconfig elasticsearch on
2、kibana
1)配置文件
[root@vm220 ~]# grep ^[^#] /etc/kibana/kibana.yml
server.host: "10.50.200.220"
server.name: "es-cluster-test-kibana"
elasticsearch.url: "http://10.50.200.220:9200"
2)启动服务
[root@vm220 ~]# service kibana restart
[root@vm220 ~]# chkconfig kibana on
3)访问
http://10.50.200.220:5601/app/kibana
3、logstash
1)配置自定义的 pattern
[root@vm220 ~]# mkdir -p /etc/logstash/patterns.d
[root@vm220 ~]# cat /etc/logstash/patterns.d/extra_patterns
NGINXACCESS %{IPORHOST:clientip} \[%{HTTPDATE:timestamp}\] "%{WORD:verb} %{URIPATHPARAM:request} HTTP/%{NUMBER:httpversion}" (?:%{QS:content_type}|-) (?:%{QS:request_body}|-) (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}) %{NUMBER:response} %{BASE16FLOAT:request_time} (?:%{NUMBER:bytes}|-)
NGINXERROR_DATESTAMP %{YEAR}/%{MONTHNUM}/%{MONTHDAY} %{TIME}
NGINXERROR_PID (?:[0-9]+#[0-9]+\:)
NGINXERROR_TID (?:\*[0-9]+)
NGINXERROR %{NGINXERROR_DATESTAMP:timestamp} \[%{LOGLEVEL:loglevel}\] %{NGINXERROR_PID:pid} %{NGINXERROR_TID:tid} %{GREEDYDATA:errormsg}, client: %{IPORHOST:clientip}, server: %{HOSTNAME:server}, request: %{QS:request}(?:, upstream: %{QS:upstream})?, host: \"%{HOSTNAME:hostname}\"(?:, referrer: (?:"(?:%{URI:referrer}|-)"|%{QS:referrer}))?
[root@vm220 ~]# grep ^[^#] /etc/logstash/logstash.yml
path.data: /var/lib/logstash
path.config: /etc/logstash/conf.d
path.logs: /var/log/logstash
【特别说明】geolite已经更新,格式有变更,请下载最新的版本。
cd /etc/logstash/ && mkdir geoip && cd geoip
wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz
gunzip GeoLite2-City.mmdb.gz
2)调整配置文件
[root@vm220 logstash]# cat conf.d/filebeat.conf
input {
beats {
port => "5044"
}
}
filter {
if[type] =~ "NginxAccess-" {
grok {
patterns_dir => ["/etc/logstash/patterns.d"]
match => {
"message" => "%{NGINXACCESS}"
}
}
date {
match => [ "timestamp", "dd/MMM/YYYY:HH:mm:ss Z" ]
remove_field => [ "timestamp" ]
}
geoip {
source => "clientip"
target => "geoip"
database => "/etc/logstash/geoip/GeoLite2-City.mmdb"
}
}
if[type] =~ "NginxError-" {
grok {
patterns_dir => ["/etc/logstash/patterns.d"]
match => {
"message" => "%{NGINXERROR}"
}
}
date {
match => [ "timestamp", "YYYY/MM/dd HH:mm:ss" ]
remove_field => [ "timestamp" ]
}
geoip {
source => "clientip"
target => "geoip"
database => "/etc/logstash/geoip/GeoLite2-City.mmdb"
}
}
}
output {
if[type] == "NginxAccess-www.test.com" {
elasticsearch {
hosts => "10.50.200.220:9200"
manage_template => false
index => "%{[@metadata][beat]}-nginxaccess-www.test.com-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
if[type] == "NginxAccess-www.work.com" {
elasticsearch {
hosts => "10.50.200.220:9200"
manage_template => false
index => "%{[@metadata][beat]}-nginxaccess-www.work.com-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
if[type] == "NginxError-www.test.com" {
elasticsearch {
hosts => "10.50.200.220:9200"
manage_template => false
index => "%{[@metadata][beat]}-nginxerror-www.test.com-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
if[type] == "NginxError-www.work.com" {
elasticsearch {
hosts => "10.50.200.220:9200"
manage_template => false
index => "%{[@metadata][beat]}-nginxerror-www.work.com-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
}
3)启动服务
centos6下使用 upstart 来启动服务:
[root@vm220 ~]# initctl restart logstash
centos7下使用 systemd 来启动服务:
[root@vm220 ~]# systemctl start logstash.service
4、filebeat
1)配置文件
[root@vm49 ~]# cat /etc/filebeat/filebeat.yml
filebeat.prospectors:
- input_type: log
paths:
- /var/log/nginx/access.www.test.com*.log
document_type: NginxAccess-www.test.com
- input_type: log
paths:
- /var/log/nginx/access.www.work.com*.log
document_type: NginxAccess-www.work.com
- input_type: log
paths:
- /var/log/nginx/error.www.test.com*.log
document_type: NginxError-www.test.com
- input_type: log
paths:
- /var/log/nginx/error.www.work.com*.log
document_type: NginxError-www.work.com
output.logstash:
hosts: ["10.50.200.220:5044"]
2)启动服务
[root@vm49 ~]# service filebeat restart
[root@vm49 ~]# chkconfig filebeat on
3)导入安装 filebeat 时,自带的模版
模版路径:/etc/filebeat/filebeat.template.json
自己可以在默认的模版的基础上做调整,例如,对比默认配置,新增的内容为:
(略)
"dynamic_templates": [
{
"strings_as_keyword": {
"mapping": {
"ignore_above": 1024,
"type": "keyword"
},
"match_mapping_type": "string"
}
},
{
"all_as_doc_values": {
"mapping": {
"doc_values": true,
"ignore_above": 1024,
"index": "not_analyzed",
"type": "{dynamic_type}"
},
"match": "*"
}
}
],
(略)
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"bytes" : {
"type" : "long",
"index": "no"
},
"geoip" : {
"properties" : {
"location" : {
"type" : "geo_point",
"index": "no"
}
}
}
(略)
a、导入模版
[root@vm220 ~]# curl -XPUT 'http://10.50.200.220:9200/_template/filebeat?pretty' -d@/etc/filebeat/filebeat.template.json
b、查看模版
[root@vm220 ~]# curl 'http://10.50.200.220:9200/_template/filebeat?pretty'
c、清理旧的 index(如果是新配置的服务,没有生成任何 index 因此也不需要清理,可略过这一步)
先查看现有的 index
[root@vm220 ~]# curl '10.50.200.220:9200/_cat/indices?v'
删除 filebeat-* 匹配的所有 index
[root@vm220 ~]# curl -XDELETE 'http://10.50.200.220:9200/filebeat-*?pretty'
再次查看,确认一下结果是否符合预期:
[root@vm220 ~]# curl '10.50.200.220:9200/_cat/indices?v'
ZYXW、参考
1、logstash
https://www.elastic.co/guide/en/logstash/5.2/running-logstash.html
2、geoip
https://github.com/elastic/logstash/issues/6167
|
|
|