|
ELK日志分析系统:
####################################################################################################
一、修改主机名,配置本地解析
hostname:linux-node1和linux-node2
192.168.11.34 linux-node1.tangbo.com linux-node1
192.168.11.35 linux-node2.tangbo.com linux-node2
####################################################################################################
二、elk准备环境(两台完全一致)
yum install yum-downloadonly -y
####################################################################################################
三、下载并安装GPG key
[root@linux-node1 ~]# rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
[root@linux-node1 ~]# vim /etc/yum.repos.d/elasticsearch.repo
[elasticsearch-2.x]
name=Elasticsearch repository for 2.x packages
baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
安装elasticsearch
yum install elasticsearch --downloadonly --downloaddir=/tmp/
####################################################################################################
四、logstash安装
[root@linux-node2 ~]# vim /etc/yum.repos.d/logstash.repo
[logstash-2.1]
name=Logstash repository for 2.1.x packages
baseurl=http://packages.elastic.co/logstash/2.1/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
[root@linux-node1 tmp]# yum install logstash --downloadonly --downloaddir=/tmp/
[root@linux-node1 tmp]# rpm -ivh logstash-2.1.3-1.noarch.rpm
Preparing... ########################################### [100%]
1:logstash ########################################### [100%]
[root@linux-node1 tmp]#
安装kibana:
[root@linux-node2 ~]#cd /usr/local/src
[root@linux-node2 ~]#wget https://download.elastic.co/kibana/kibana/kibana-4.3.1-linux-x64.tar.gz
tar zxf kibana-4.3.1-linux-x64.tar.gz
[root@linux-node1 src]# mv kibana-4.3.1-linux-x64 /usr/local/
[root@linux-node2 src]# ln -s /usr/local/kibana-4.3.1-linux-x64/ /usr/local/kibana
####################################################################################################
五、安装java,可选安装redis,nginx
[root@linux-node2 ~]# rpm -ivh jdk-7u80-linux-x64.rpm
Preparing... ########################################### [100%]
1:jdk ########################################### [100%]
Unpacking JAR files...
rt.jar...
jsse.jar...
charsets.jar...
tools.jar...
localedata.jar...
jfxrt.jar...
[root@linux-node2 ~]#
####################################################################################################
六、管理linux-node1的elasticsearch
修改elasticsearch配置文件,并授权:
[root@linux-node1 src]# grep -n '^[a-Z]' /etc/elasticsearch/elasticsearch.yml
17:cluster.name: chuck-cluster 判别节点是否是统一集群
23:node.name: linux-node1 节点的hostname
33:path.data: /data/es-data 数据存放路径
37:path.logs: /var/log/elasticsearch/ 日志路径
43:bootstrap.mlockall: true 锁住内存,使内存不会再swap中使用
54:network.host: 0.0.0.0 允许访问的ip
58:http.port: 9200 端口
[root@linux-node1 ~]# mkdir -p /data/es-data
[root@linux-node1 src]# chown elasticsearch.elasticsearch /data/es-data/
启动elasticsearch:
[root@linux-node1 tmp]# service elasticsearch start
Starting elasticsearch: [ OK ]
[root@linux-node1 tmp]#
[root@linux-node1 tmp]# netstat -lntup|grep 9200
tcp 0 0 :::9200 :::* LISTEN 1993/java
[root@linux-node1 tmp]#
####################################################################################################
七、启动成功,访问9200端口,会把信息显示出来 。浏览器要用火狐、谷歌。
{
"name" : "linux-node1",
"cluster_name" : "chuck-cluster",
"version" : {
"number" : "2.2.0",
"build_hash" : "8ff36d139e16f8720f2947ef62c8167a888992fe",
"build_timestamp" : "2016-01-27T13:32:39Z",
"build_snapshot" : false,
"lucene_version" : "5.4.1"
},
"tagline" : "You Know, for Search"
}
####################################################################################################
八、使用RESTful API进行交互
(1)查看当前索引和分片情况,稍后会有插件展示:
[root@linux-node1 tmp]# curl -i -XGET 'http://172.16.10.34:9200/_count?pretty' -d '{
> "query" {
> "match_all": {}
> }
> }'
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 95
{
"count" : 0,
"_shards" : {
"total" : 0,
"successful" : 0,
"failed" : 0
}
}
[root@linux-node1 tmp]#
(2)使用head插件显示索引和分片情况(未成功就多安装几次)
[root@linux-node1 tmp]# /usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head
-> Installing mobz/elasticsearch-head...
Trying https://github.com/mobz/elasticsearch-head/archive/master.zip ...
Downloading ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................DONE
Verifying https://github.com/mobz/elasticsearch-head/archive/master.zip checksums if available ...
NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)
Installed head into /usr/share/elasticsearch/plugins/head
[root@linux-node1 tmp]#
访问:http://172.16.10.34:9200/_plugin/head/
####################################################################################################
九、使用kopf插件监控elasticsearch
[root@linux-node1 tmp]# /usr/share/elasticsearch/bin/plugin install lmenezes/elasticsearch-kopf
-> Installing lmenezes/elasticsearch-kopf...
Trying https://github.com/lmenezes/elasticsearch-kopf/archive/master.zip ...
Downloading .............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................DONE
Verifying https://github.com/lmenezes/elasticsearch-kopf/archive/master.zip checksums if available ...
NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)
Installed kopf into /usr/share/elasticsearch/plugins/kopf
[root@linux-node1 tmp]#
打开看看:http://172.16.10.34:9200/_plugin/kopf/#!/cluster
####################################################################################################
十、配置logstash
(1)启动一个logstash,-e:在命令行执行;input输入,stdin标准输入,是一个插件;output输出,stdout:标准输出
[root@linux-node1 bin]# /opt/logstash/bin/logstash -e 'input { stdin{} } output { stdout{} }' Settings: Default filter workers: 1
Logstash startup completed
chuck ==>输入
2016-01-14T06:01:07.184Z linux-node1 chuck ==>输出
www.chuck-blog.com ==>输入
2016-01-14T06:01:18.581Z linux-node1 www.chuck-blog.com ==>输出
(2)使用rubudebug显示详细输出,codec为一种编解码器
[root@linux-node1 bin]# /opt/logstash/bin/logstash -e 'input { stdin{} } output { stdout{ codec => rubydebug} }'
Settings: Default filter workers: 1
Logstash startup completed
chuck ==>输入
{
"message" => "chuck",
"@version" => "1",
"@timestamp" => "2016-01-14T06:07:50.117Z",
"host" => "linux-node1"
} ==>使用rubydebug输出
上述每一条输出的内容称为一个事件,多个相同的输出的内容合并到一起称为一个事件(举例:日志中连续相同的日志输出称为一个事件)!
(3)使用logstash将信息写入到elasticsearch
####################################################################################################
####################################################################################################
编辑kinaba配置文件使之生效:
[root@linux-node1 ~]# grep '^[a-Z]' /usr/local/kibana/config/kibana.yml
server.port: 5601 kibana端口
server.host: "0.0.0.0" 对外服务的主机
elasticsearch.url: "http://172.16.10.34:9200" 和elasticsearch联系
kibana.index: ".kibana 在elasticsearch中添加.kibana索引
一个screen,并启动kibana:
[root@linux-node1 tmp]# yum -y install screen
[root@linux-node1 ~]# screen
[root@linux-node1 ~]# /usr/local/kibana/bin/kibana
使用crtl +a+d退出screen
|
|
|