如果没有报错,如果需要zookeeper和kafka 那就先启动zookeeper 在启动kafka (当然也可以写一个启动脚本)
# nohup /usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties &
# nohup /usr/local/kafka/bin/zookeeper-server-start.sh /usr/local/kafka/config/zookeeper.properties &检查启动情况 默认开启的端口为 2181(zookeeper) 和 9202(kafka)
创建topic
# bin/kafka-topics.sh --create --zookeeper zk01.yiguanjinrong.yg:2181 --replication-factor 1 --partition 1 --topic test
Created topic "test".查看创建的topic
# bin/kafka-topics.sh --list --zookeeper zk01.yiguanjinrong.yg:2181
test模拟客户端发送消息
# bin/kafka-console-producer.sh --broker-list 192.168.128.144:9092 --topic test
确定后 输入一些内容,然后确定模拟客户端接收信息(如果能正常接收到信息说明kafka部署正常)
# bin/kafka-console-consumer.sh --bootstrap-server 192.168.128.144:9202 --topic test --from-beginning删除topic
# bin/kafka-topics.sh --delete --zookeeper zk01.yiguanjinrong.yg:2181 --topic test安装部署Logstash 安装Logstash
# yum localinstall jdk-8u144-linux-x64.rpm -y
# yum localinstall logstash-6.0.0-beta2.rpm -ylogstash的安装目录和配置文件的目录(默认没有配置文件)分别为
# /usr/share/logstash/ 安装完成之后,并没有把bin目录添加到环境变量中
# /etc/logstash/conf.d/Logstash的配置文件信息
# cat /etc/logstash/conf.d/logstash.conf
input {
kafka {
bootstrap_servers => "192.168.128.144:9092,192.168.128.145:9092,192.168.128.145:9092"
topics => ["credit"]
group_id => "test-consumer-group"
codec => "plain"
consumer_threads => 1
decorate_events => true
}
}
output {
elasticsearch {
hosts => ["192.168.162.58:9200","192.168.162.61:9200","192.168.162.62:9200","192.168.162.63:9200","192.168.162.64:9200"]
index => "logs-%{+YYYY.MM.dd}"
workers => 1
}
}检查配置文件是否正确
# /usr/share/logstash/bin/logstash -t --path.settings /etc/logstash/ --verbose
Sending Logstash's logs to /var/log/logstash which is now configured via log4j2.properties
Configuration OK由于logstash 默认没有启动脚本,但是已经给出创建方法
查看脚本使用帮助
# bin/system-install --help
Usage: system-install [OPTIONSFILE] [STARTUPTYPE] [VERSION]
NOTE: These arguments are ordered, and co-dependent
OPTIONSFILE: Full path to a startup.options file
OPTIONSFILE is required if STARTUPTYPE is specified, but otherwise looks first
in /usr/share/logstash/config/startup.options and then /etc/logstash/startup.options
Last match wins
STARTUPTYPE: e.g. sysv, upstart, systemd, etc.
OPTIONSFILE is required to specify a STARTUPTYPE.
VERSION: The specified version of STARTUPTYPE to use. The default is usually
preferred here, so it can safely be omitted.
Both OPTIONSFILE & STARTUPTYPE are required to specify a VERSION.
# /usr/share/logstash/bin/system-install /etc/logstash/startup.options sysv
创建之后文件为: /etc/init.d/logstash 要注意修改日志目录地址,建议把log放置在 /var/log/logstash
# mkdir -p /var/log/logstash && chown logstash.logstash -R /var/log/logstash下面是需要修改的部分
start() {
# Ensure the log directory is setup correctly.
if [ ! -d "/var/log/logstash" ]; then
mkdir "/var/log/logstash"
chown "$user":"$group" -R "/var/log/logstash"
chmod 755 "/var/log/logstash"
fi
# Setup any environmental stuff beforehand
ulimit -n ${limit_open_files}
# Run the program!
nice -n "$nice" \
chroot --userspec "$user":"$group" "$chroot" sh -c "
ulimit -n ${limit_open_files}
cd \"$chdir\"
exec \"$program\" $args
" >> /var/log/logstash/logstash-stdout.log 2>> /var/log/logstash/logstash-stderr.log &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
emit "$name started"
return 0
}启动Logstash,并查看日志有无报错
# /etc/init.d/logstash start安装部署Elasticsearch群集 安装Elasticsearch
# yum localinstall jdk-8u144-linux-x64.rpm -y
# yum localinstall elasticsearch-6.0.0-beta2.rpm -y配置Elasticsearch
安装路径
# /usr/share/elasticsearch/
配置文件
# /etc/elasticsearch/elasticsearch.ymlElasticsearch 配置文件信息
# cat elasticsearch.yml | grep -Ev "^$|^#"
cluster.name: elasticsearch
node.name: es01 #其他节点修改相应的节点名
path.data: /data1/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.system_call_filter: false
network.host: 192.168.162.58 #其他节点修改地址信息
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.162.58", "192.168.162.61", "192.168.162.62", "192.168.162.63", "192.168.162.64"]
discovery.zen.minimum_master_nodes: 3
node.master: true
node.data: true
transport.tcp.compress: true启动Elasticsearch
# mkdir -p /var/log/elasticsearch && chown elasticsearch.elasticsearch -R /var/log/elasticsearch
# /etc/init.d/elasticsearch start安装部署Kibana 安装Kibana
# yum localinstall kibana-6.0.0-beta2-x86_64.rpm -yKibana配置文件信息
# cat /etc/kibana/kibana.yml | grep -Ev "^$|^#"
server.port: 5601
server.host: "192.168.162.66"
elasticsearch.url: "http://192.168.162.58:9200" #elasticsearch群集地址,任意一个es节点的地址即可
kibana.index: ".kibana"
pid.file: /var/run/kibana/kibana.pid启动Kibana
修改kibana启动脚本
# mkdir -p /var/run/kibana
# chown kibana.kibana -R /var/run/kibana修改kibana启动脚本部分
start() {
# Ensure the log directory is setup correctly.
[ ! -d "/var/log/kibana/" ] && mkdir "/var/log/kibana/"
chown "$user":"$group" "/var/log/kibana/"
chmod 755 "/var/log/kibana/"
# Setup any environmental stuff beforehand
# Run the program!
chroot --userspec "$user":"$group" "$chroot" sh -c "
cd \"$chdir\"
exec \"$program\" $args
" >> /var/log/kibana/kibana.stdout 2>> /var/log/kibana/kibana.stderr &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
emit "$name started"
return 0
}
启动
#/etc/init.d/kibana start现在可以访问Kibana页面 Http://192.168.162.66:5601