浅见池也 发表于 2019-1-20 14:45:55

使用Zabbix批量监控网站可用性方案二

  一 应用场景描述
  在上一篇文章中介绍了 使用Zabbix批量监控网站可用性方案一 Zabbix自带的Web监控只是利用libcurl库在Zabbix server或者proxy端来检测所有的url,这样实际上是不能检查到区域访问各个url的网络质量的。本文则使用Zabbix LLD,pycurl模块以及Zabbix sender来收集不同区域的Zabbix agent访问各个url的网络质量。
  

  二 编写脚本
  python多线程版本
#!/usr/bin/python
#this script is used to check multiple urls within different websites from a given file which contains all the urls of different websites
#use zabbix low level discovery(LLD) to discovery different websites and different urls,and then use pycurl to check those urls,finally send the result to zabbix proxy or zabbix server using zabbix_sender
# here has two zabbix macros: {#WEBSITE_NAME} and {#WEBSITE_URL}
# tested on zabbix 3.0
#
#written by john wang
#
#curl_easy_perform()
#    |
#    |--NAMELOOKUP
#    |--|--CONNECT
#    |--|--|--APPCONNECT
#    |--|--|--|--PRETRANSFER
#    |--|--|--|--|--STARTTRANSFER
#    |--|--|--|--|--|--TOTAL
#    |--|--|--|--|--|--REDIRECT


import json
import logging
import os,sys,time
import threading
import Queue
import subprocess
try:
   from cStringIO import StringIO
except ImportError:
   from StringIO import StringIO
import pycurl
# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see
# the libcurl tutorial for more info.
try:
   import signal
   from signal import SIGPIPE,SIG_ING
   signal.signal(signal.SIGPIPE,signal.SIG_IGN)
except ImportError:
   pass

# need a given txt file contains urls
#eg.
#baiduwww.baidu.com
#taobao www.taobao.com
try:
   if sys.argv=="-":
      urls=sys.stdin.readlines()
   else:
      urls=open(sys.argv,'rb').readlines()
   #print urls
except:
   print "Usage: %s check_urls.txt (list_websites)" %sys.argv
   raise SystemExit

#logging.basicConfig(filename='/tmp/check_urls.log', level=logging.WARNING, format='%(asctime)s %(levelname)s: %(message)s')
zabbix_conf='/opt/app/zabbix/conf/zabbix_agentd.conf'

class Curl:
   def __init__(self,url):
       self.url=url
       self.body=StringIO()
       self.http_code=0
       self._curl=pycurl.Curl()
       self._curl.setopt(pycurl.URL,self.url)
       self._curl.setopt(pycurl.FOLLOWLOCATION,True)
       self._curl.setopt(pycurl.DNS_CACHE_TIMEOUT,0)
       self._curl.setopt(pycurl.DNS_USE_GLOBAL_CACHE,False)
       self._curl.setopt(pycurl.CONNECTTIMEOUT,30)
       self._curl.setopt(pycurl.TIMEOUT,60)
       self._curl.setopt(pycurl.FRESH_CONNECT,True)
       self._curl.setopt(pycurl.FORBID_REUSE,True)
       self._curl.setopt(pycurl.WRITEFUNCTION,self.body.write)
       self._curl.setopt(pycurl.NOSIGNAL,1)
       self._curl.debug=0
   def perform(self):
       try:
         self._curl.perform()
       except Exception as e:
         #logging.warning(url+"\t" + str(e))
         return
   def close(self):
      try:
       self.http_code=self._curl.getinfo(pycurl.HTTP_CODE)
            self.total_time=self._curl.getinfo(pycurl.TOTAL_TIME)
            self.namelookup_time=self._curl.getinfo(pycurl.NAMELOOKUP_TIME)
            self.connect_time=self._curl.getinfo(pycurl.CONNECT_TIME)-self._curl.getinfo(pycurl.NAMELOOKUP_TIME)
            self.appconnect_time=max(0,(self._curl.getinfo(pycurl.APPCONNECT_TIME) - self._curl.getinfo(pycurl.CONNECT_TIME)))
            self.pretransfer_time=self._curl.getinfo(pycurl.PRETRANSFER_TIME) - max(self._curl.getinfo(pycurl.APPCONNECT_TIME),self._curl.getinfo(pycurl.CONNECT_TIME))
            self.starttransfer_time=self._curl.getinfo(pycurl.STARTTRANSFER_TIME) - self._curl.getinfo(pycurl.PRETRANSFER_TIME)
            self.redirect_time=max(0,self._curl.getinfo(pycurl.REDIRECT_TIME) - self._curl.getinfo(pycurl.TOTAL_TIME))
            self.speed_download=self._curl.getinfo(pycurl.SPEED_DOWNLOAD)
      except Exception as e:
      #logging.warning(url+"\t"+str(e))
      self.http_code=0
      self.total_time=0
      self.namelookup_time=0
      self.connect_time=0
      self.appconnect_time=0
      self.pretransfer_time=0
      self.starttransfer_time=0
      self.redirect_time=0
      self.speed_download=0      
      self._curl.close()
queue=Queue.Queue()
websites=[]
tmpfile='/tmp/check_url_items.txt'
for line in urls:
    line=line.strip()
    if not line or line == "#":
       continue
    name,url=line.split()
    element={'{#WEBSITE_NAME}':name,
             '{#WEBSITE_URL}':url
            }
    websites.append(element)
    #logging.debug('Discovered website ' + name + '\t' + url)
    queue.put((name,url))
assert queue.queue, "no urls are given"
num_urls=len(queue.queue)
#num_conn=min(num_conn,num_urls)
num_conn=num_urls
#assert 1
页: [1]
查看完整版本: 使用Zabbix批量监控网站可用性方案二