枫叶飞翔 发表于 2019-1-29 06:08:18

Logstash+elasticsearch+elastic+nignx

  注:本系统使用的是Logstash+elasticsearch+elastic+nignx 进行日志分析、展示
  1环境版本:... 2
  1.1主机:... 2
  1.2前提:... 2
  2 Logstash配置... 2
  3 Kibana与elasticsearch的启动... 6
  3.1 elasticsearch. 6
  3.2 kibana. 7
  4 Nginx的配置:... 7
  

  
1环境版本:

[*]  操作系统:CentOS 7.2.1511
[*]  内核:Linux Logs3.10.0-123.9.3.el7.x86_64
[*]  JDK: 1.8.0_74
[*]  logstash-2.2.2
下载地址(github):https://github.com/elastic/logstash/tree/2.2
功能:对输入日志进行收集、分析,并将其存储供以后使用(如,搜索)。

[*]  elasticsearch-2.2.0
功能:对logstash分析结果的输入提供进行自定义搜索
下载地址(github):https://github.com/elastic/elasticsearch/tree/2.2

[*]  kibana-4.4.1
功能:连接elasticsearch-2.2.0,提供web界面
下载地址(github):https://github.com/elastic/kibana/tree/4.4

[*]  nginx: 1.9.12
  将 kibana 的端口转发到 80,并定义好访问用的域名。
1.1主机:
  web1: 10.46.90.80(内网),xx.xx.xx.xx(外网)
  logs: 10.46.90.147(内网),xx.xx.xx.xx(外网)
1.2前提:

[*]  Nfs
  在logs搭建好nfs,共享 /opt/logs,挂载到 web1 的 /home/wwwlogs,web1 的 php 日志直接输出到 /home/wwwlogs/*/
  logstash 、kibana 、elasticsearch都下载到 /opt/

[*]  JDK已经安装


[*]  安装好nginx
2 Logstash配置
Logstash 可以git下载到本地直接使用,其配置是最主要的,它会对日志进行收集、分析,并将其存储供以后使用(如,搜索)。


logstash 的 shipper.conf 配置文件 grok 筛选都使用 ruby 正则表达式,在此推荐一个guby 正则表达式模拟器:http://www.rubular.com/


  新建配置文件并配置:
  # mkdir /opt/logstash/conf.d
  # vi /opt/logstash/conf.d/shipper.conf
  input {
  #stdin {
  #}
  #file {
  #path=>"/opt/logs/*/*_nginx.log"
  #type => "access"
  #codec => json
  #}
  file {
  path=> "/opt/logs/php/admin.etcchebao.com/*.log"
  #path=>"/opt/logs/php/admin.etcchebao.com/admin.log"
  type => "admin"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern => "^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  file {
  path=> "/opt/logs/php/passport.etcchebao.com/*.log"
  #path=>"/opt/logs/php/passport.etcchebao.com/passport.log"
  type => "passport"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern => "^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  file {
  path=> "/opt/logs/php/push.etcchebao.com/*.log"
  #path=>"/opt/logs/php/push.etcchebao.com/push.log"
  type => "push"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern =>"^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  file {
  path=> "/opt/logs/php/seller.etcchebao.com/*.log"
  #path=>"/opt/logs/php/seller.etcchebao.com/seller.log"
  type => "seller"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern =>"^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  file {
  path=> "/opt/logs/php/m.etcchebao.com/*.log"
  #path=>"/opt/logs/php/m.etcchebao.com/m.log"
  type => "m"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern =>"^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  file {
  path=> "/opt/logs/php/pay.etcchebao.com/*.log"
  #path=>"/opt/logs/php/pay.etcchebao.com/pay.log"
  type => "pay"
  codec => multiline {
  # Grok pattern names are valid!:)
  pattern =>"^\[\d{4}"         #开头匹配[+4个年份字符
  #pattern =>"^%{TIMESTAMP_ISO8601} "
  negate => true
  what => previous
  }
  }
  }
  filter {
  #      if == "access" {
  #               grok {
  #                     match => {"message" => "%{COMBINEDAPACHELOG}" }
  #               }
  #               date {
  #                     match => ["timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
  #               }
  #      }
  grok {
  match => [
  #404错误
  "message","\:(?\d{3}?)\]",
  #Error错误
  "message","\[(?\Error?)\]",
  #500错误
  "message","系统(?\d{3}?)错误\.*ERROR_NO:(?*$?).*ERROR_STR:(?.*$?)\\.*ERROR_LINE:(?*$?).*ERROR_FILE:(?\\.*$?)\\n"
  ]
  }
  }
  #输出到redis
  #output {
  #   redis {
  #       host => "127.0.0.1"
  #       port => "6379"
  #      type => "nginx-log"
  #       data_type => "list"
  #       key => "logstash"
  #   }
  #}
  #输出到elasticsearch
  output {
  elasticsearch {
  #hosts => ["127.0.0.1:9300"]
  hosts => "127.0.0.1"
  index => "logstash-%{type}-%{+YYYY.MM.dd}"
  document_type => "%{type}"
  #workers => 1
  #flush_size => 20000
  #idle_flush_time => 10
  #template_overwrite => true
  }
  #if != "404" {
  #   exec {
  #                     #command => "echo'%{timestamp}:%{message}' | mail -s 'Log_error: HttpException error'yunwei@etcchebao.com"
  #                     command => "echo'%{timestamp}:%{message}' | mail -s 'Log_error: HttpException 'yunwei@etcchebao.com"
  #      }
  #   }
  }
  output{
  if != "404" {
  exec {
  #command =>"echo '%{timestamp}:%{message}' | mail -s 'Log_error: HttpException error'yunwei@etcchebao.com"
  command =>"echo '%{timestamp}:%{message}' | mail -s 'Log_error: HttpException' yunwei@etcchebao.com"
  }
  }
  }
  #屏幕输出-test
  output {
  stdout {
  codec => rubydebug
  }
  }
  Logstash的启动:
  # nohup/opt/logstash/bin/logstash -f /opt/logstash/conf.d/shipper.conf > /dev/null2>&1 &
  检查启动情况:
http://s3.运维网.com/wyfs02/M02/80/F2/wKioL1dFUqSQhZkAAAA8IIwjsK8524.png
3 Kibana与elasticsearch 的启动
  kibana 与 elasticsearch 都无需安装,只要下载到本地即可直接使用,最好先启动logstash。要注意的是,但默认不允许使用 root帐号启动,所以使用专门运行nginx的www用户启动。
3.1 elasticsearch
  $ nohup/opt/elasticsearch-2.2.0/bin/elasticsearch > /dev/null 2>&1 &
  $ ps -elf|grep elasticsearch
  检查进程:
http://s1.运维网.com/wyfs02/M01/80/F3/wKiom1dFUdHTMXhIAAAn2EmdD5w413.png
  检查端口:
http://s1.运维网.com/wyfs02/M00/80/F3/wKiom1dFUeCRmUZGAAAblXGqyTQ260.png
3.2 kibana
  $ nohup /opt/kibana/bin/kibana> /dev/null 2>&1 &
  $ ps -elf|grep kibana
  检查进程:
  http://s4.运维网.com/wyfs02/M01/80/F2/wKioL1dFU37C7DfaAAAaiJRCrI4054.png
  检查端口:
http://s3.运维网.com/wyfs02/M02/80/F2/wKioL1dFU4jDTO4LAAAYglrijrI257.png
  
4 Nginx的配置:
  $ vi/usr/local/nginx/conf/vhost/logs.etcchebao.cn.conf
  server {
  listen 80;
  server_name logs.etcchebao.cn;
  location/ {
  auth_basic "secret";
  auth_basic_user_file /usr/local/nginx/logs_etcchebao.passwd;
  proxy_pass http://127.0.0.1:5601;
  }
  }
  




页: [1]
查看完整版本: Logstash+elasticsearch+elastic+nignx