mysql性能参数详解
mysql性能参数查看脚本[*]#!/usr/bin/env python
[*]# -*- coding: utf-8 -*-
[*]# @Author: wangwei
[*]# @E-mail: wangwei03@gyyx.cn
[*]# @Create Date:2012-06-19
[*]import MySQLdb
[*]import _mysql
[*]import re
[*]import time
[*]import sys
[*]import getpass
[*]from decimal import Decimal
[*]
[*]port=3306
[*]dbname="test"
[*]times=2
[*]slow=60
[*]version=Decimal("5.5")
[*]
[*]class tools(object):
[*] @staticmethod
[*] def println(header="",all="",current="",mark=""):
[*]print header.ljust(42)+": "+all.ljust(30)+" "+current.ljust(30)+" "+mark
[*] @staticmethod
[*] def dRatio(param1,param2):
[*]p1=param1
[*]p2=param2
[*]re=""
[*]if(p1==0):
[*] if(p2==0):
[*] re="0:0"
[*] else:
[*] re="0:1"
[*]else:
[*] if(p2==0):
[*] re="1:0"
[*] else:
[*] if(p1==p2):
[*] re="1:1"
[*] elif(p1>p2):
[*] re= tools.dFormat(Decimal(p1)/Decimal(p2))+":1"
[*] else:
[*] re="1:"+tools.dFormat(Decimal(p2)/Decimal(p1))
[*]return re+" "+tools.remarks()
[*] @staticmethod
[*] def remarks(params):
[*]#print params
[*]arr=[]
[*]for param in params:
[*] arr.append(tools.dFormat(param))
[*]return "("+"/".join(arr)+")"
[*] @staticmethod
[*] def dFormat(val):
[*]k=1024
[*]m=k*k
[*]g=k*m
[*]t=k*g
[*]p=k*t
[*]dp=0
[*]dm=""
[*]if(val!=0):
[*] if(val>p):
[*] dp=p
[*] dm="P"
[*] elif(val>t):
[*] dp=t
[*] dm="T"
[*] elif(val>g):
[*] dp=g
[*] dm="G"
[*] elif(val>m):
[*] dp=m
[*] dm="M"
[*] elif(val>k):
[*] dp=k
[*] dm="k"
[*] else:
[*] dp=1
[*] return "%2.2f" % (Decimal(val)/Decimal(dp)) +dm
[*]else:
[*] return "%2.2f" % 0
[*]
[*]class mysqlvariables(object):
[*] def __init__(self,variables):
[*]self.variables=variables
[*]
[*] def param(self,param1):
[*]if(self.variables.has_key(param1)):
[*] return long(self.variables)
[*]else:
[*] if(param1=='table_open_cache'):
[*] return long(self.variables['table_cache'])
[*] else:
[*] return 0L
[*] def dFormat(self,param1):
[*]return tools.dFormat(self.param(param1))
[*] def strFormat(self,param1):
[*]if(self.variables.has_key(param1)):
[*] return self.variables
[*]else:
[*] if(param1=='character_set_server'):
[*] return 'null'
[*] elif(param1=='table_open_cache'):
[*] return self.variables['table_cache']
[*] else:
[*] return 'null'
[*]
[*]class mysqlstatuscomputer(object):
[*] def __init__(self,statuslist):
[*]self.statuslist=statuslist
[*]self.length=len(statuslist)
[*]self.second=self.seconds()
[*]
[*] def getval(self,param):
[*]if(type(param)==type("")):
[*] return self.param(param)
[*]elif(type(param)==type([])):
[*] return self.arange(param)
[*]else:
[*] return param
[*]
[*] def param(self,param1):
[*]if(self.length>1):
[*] if(self.statuslist.has_key(param1)):
[*] return long(self.statuslist)-long(self.statuslist)
[*] else:
[*] if(param1=='Innodb_buffer_pool_read_ahead' and version>5):
[*] return (long(self.statuslist['Innodb_buffer_pool_read_ahead_rnd'])+long(self.statuslist['Innodb_buffer_pool_read_ahead_seq']))-(long(self.statuslist['Innodb_buffer_pool_read_ahead_rnd'])+long(self.statuslist['Innodb_buffer_pool_read_ahead_seq']))
[*] elif(param1=='Uptime_since_flush_status'):
[*] return long(self.statuslist['Uptime'])-long(self.statuslist['Uptime'])
[*] else:
[*] return 0L
[*]else:
[*] if(self.statuslist.has_key(param1)):
[*] return long(self.statuslist)
[*] else:
[*] if(param1=='Innodb_buffer_pool_read_ahead' and version>5):
[*] return long(self.statuslist['Innodb_buffer_pool_read_ahead_rnd'])+long(self.statuslist['Innodb_buffer_pool_read_ahead_seq'])
[*] elif(param1=='Uptime_since_flush_status'):
[*] return long(self.statuslist['Uptime'])
[*] else:
[*] return 0L
[*]
[*] def seconds(self):
[*]return self.param('Uptime_since_flush_status')
[*] '''
[*] param1+param2+....params
[*] '''
[*] def arange(self,params):
[*]re=0
[*]for param in params:
[*] re+=self.getval(param)
[*]return re
[*] '''
[*] param1+param2
[*] '''
[*] def add(self,param1,param2):
[*]return self.getval(param1)+self.getval(param2)
[*] '''
[*] param1-param2
[*] '''
[*] def sub(self,param1,param2):
[*]return self.getval(param1)-self.getval(param2)
[*] '''
[*] param1*param2
[*] '''
[*] def ext(self,param1,param2):
[*]return self.getval(param1)*self.getval(param2)
[*] '''
[*] param1/param2
[*] '''
[*] def per(self,param1,param2):
[*]a=self.getval(param1)
[*]b=self.getval(param2)
[*]if(b!=0):
[*] return Decimal(a)/Decimal(b)
[*]else:
[*] return Decimal(0)
[*]
[*] '''
[*] param/seconds
[*] '''
[*] def ps(self,param):
[*]val=self.getval(param)
[*]
[*]if(self.second>0):
[*] return Decimal(val)/Decimal(self.second)
[*]else:
[*] return Decimal(0)
[*] '''
[*] format param to P T G M K
[*] '''
[*] def format(self,param):
[*]val=self.getval(param)
[*]return tools.dFormat(val)
[*]
[*] def strFormat(self,param):
[*]return self.statuslist
[*]
[*] '''
[*] param1:param2 (param1/param2)
[*] '''
[*] def ratioF(self,param1,param2):
[*]p1=self.getval(param1)
[*]p2=self.getval(param2)
[*]return tools.dRatio(p1, p2)
[*] '''
[*] param1/param2 % (param1/param2)
[*] '''
[*] def perF(self,param1,param2):
[*]return self.format(self.per(param1, param2)*100) + "% "+self.remark(param1, param2)
[*] '''
[*] 1-(param1/param2) % (param1/param2)
[*] '''
[*] def dperF(self,param1,param2):
[*]return self.format((Decimal(1)-self.per(param1, param2))*100) + "% "+self.remark(param1, param2)
[*] '''
[*] param1/(param1+param2) % (param1/param2)
[*] '''
[*] def aperF(self,param1,param2):
[*]return self.format(self.per(param1,self.add(param1, param2))*100) + "% "+self.remark(param1, param2)
[*] '''
[*] param/seconds % (param/seconds)
[*] '''
[*] def psF(self,param):
[*]return self.format(self.ps(param))+"/s "+self.remark(param, self.second)
[*]
[*] '''
[*] (param1/param2)
[*] '''
[*] def remark(self,param1,param2):
[*]params=[]
[*]params.append(param1)
[*]params.append(param2)
[*]return self.remarks(params)
[*] '''
[*] (param1/param2/.../params)
[*] '''
[*] def remarks(self,params):
[*]#print params
[*]arr=[]
[*]for param in params:
[*] arr.append(self.format(self.getval(param)))
[*]return "("+"/".join(arr)+")"
[*]
[*]class mysqlstatusmonitor(object):
[*] def __init__(self,conn):
[*]self.inx=times
[*]self.conn=conn
[*]self.statusList=[]
[*]self.statustmpList=[]
[*]self.totalstatus={}
[*]self.totaltmpstatus=()
[*]self.getstatus()
[*]
[*] def getstatus(self):
[*]statustmp=()
[*]mysqlstatus={}
[*]sql="show /*!41000 GLOBAL */ status"
[*]cursor=self.conn.cursor()
[*]for i in range(self.inx):
[*] cursor.execute(sql)
[*] statustmp=cursor.fetchall()
[*] mysqlstatus={}
[*] for row in statustmp:
[*] mysqlstatus.setdefault(row,row)
[*] self.statustmpList.append(statustmp)
[*] self.statusList.append(mysqlstatus)
[*] if(i0 threads list:"
[*]print "Id\tUser\t\tHost\t\t\tdb\t\tCommand\t\tTime\t\tState\tInfo\t"
[*]threadscount=0
[*]for row in self.processlist:
[*] if(str(row)!="Sleep" and long(row)>1):
[*] print str(row)+"\t"+str(row)+"\t\t"+str(row)+"\t"+str(row)+"\t\t"+str(row)+"\t\t"+str(row)+"\t\t"+str(row)+"\t"+str(row)
[*] threadscount+=1
[*]
[*]print "status time>0 threads count="+str(threadscount)
[*]
[*]
[*] def printmysqlinfo(self):
[*]print "=============MySQL info============="
[*]print "Connection id : "+str(self.conn.thread_id())
[*]print "Current database : "+dbname
[*]print "Current user : "+str(self.currentuser)
[*]print "SSL : "+self.mysqlvariables.strFormat('have_openssl')
[*]#print "Current pager : "
[*]#print "Using outfile : "
[*]#print "Using delimiter : "
[*]self.version=Decimal(self.mysqlvariables.strFormat('version'))
[*]print "MySQL VERSION : "+self.mysqlvariables.strFormat('version')+" "+self.mysqlvariables.strFormat('version_comment')
[*]print "MySQL client info : "+_mysql.get_client_info()
[*]print "Protocol version : "+str(self.conn.get_proto_info())
[*]print "Connection : "+self.conn.get_host_info()
[*]print "Server characterset: "+self.mysqlvariables.strFormat('character_set_server')
[*]print "Db characterset: "+self.mysqlvariables.strFormat('character_set_database')
[*]print "Client characterset: "+self.mysqlvariables.strFormat('character_set_client')
[*]print "Conn.characterset: "+self.mysqlvariables.strFormat('character_set_connection')
[*]print "collation_connection : "+self.mysqlvariables.strFormat('collation_connection')
[*]print "collation_database : "+self.mysqlvariables.strFormat('collation_database')
[*]print "collation_server : "+self.mysqlvariables.strFormat('collation_server')
[*]print "Uptime : "+self.mysqlstatus['Uptime']+"s"
[*]
[*] def printQcachestatus(self):
[*]if(self.mysqlvariables.strFormat("have_query_cache")=="YES" and self.mysqlvariables.strFormat("query_cache_type")!="OFF" and self.mysqlvariables.param("query_cache_size")>0):
[*] print "------------------------------------"
[*] print "Qcache Status"
[*] print "------------------------------------"
[*] tools.println("Qcache queries hits ratio(hits/reads)",self.totalcomputer.perF("Qcache_hits", ["Com_select", "Qcache_hits"]),self.computer.perF("Qcache_hits", ["Com_select", "Qcache_hits"]),"Higher than 30.00")
[*] tools.println("Qcache hits inserts ratio(hits/inserts)",self.totalcomputer.perF("Qcache_hits", "Qcache_inserts"),self.computer.perF("Qcache_hits", "Qcache_inserts"),"Higher than 300.00")
[*] tools.println("Qcache memory used ratio(free/total)",self.totalcomputer.dperF("Qcache_free_memory",self.mysqlvariables.param("query_cache_size")))
[*] tools.println("Qcache prune ratio(prunes/inserts)",self.totalcomputer.perF("Qcache_lowmem_prunes", "Qcache_inserts"),self.computer.perF("Qcache_lowmem_prunes", "Qcache_inserts"))
[*] tools.println("Qcache block Fragmnt ratio(free/total)",self.totalcomputer.perF("Qcache_free_blocks", "Qcache_total_blocks"))
[*]
[*] def printUptimesinceflushstatus(self):
[*]print "-----------------------------------------------------------------------------------------------------------------------------"
[*]print "Reads/Writes status total current proposal "
[*]print "-----------------------------------------------------------------------------------------------------------------------------"
[*]tools.println("Reads:Writes ratio(Reads/Writes)",self.totalcomputer.ratioF(["Com_select", "Qcache_hits"], ["Com_insert","Com_insert_select","Com_update","Com_update_multi","Com_delete","Com_delete_multi","Com_replace","Com_replace_select"]),self.computer.ratioF(["Com_select", "Qcache_hits"], ["Com_insert","Com_insert_select","Com_update","Com_update_multi","Com_delete","Com_delete_multi","Com_replace","Com_replace_select"]))
[*]tools.println("QPS(Questions/Uptime)",self.totalcomputer.psF("Questions"),self.computer.psF("Questions"))
[*]tools.println("TPS(Questions/Uptime)",self.totalcomputer.psF(["Com_commit","Com_rollback"]),self.computer.psF(["Com_commit","Com_rollback"]))
[*]tools.println("Table locks waited ratio(waited/immediate)",self.totalcomputer.ratioF("Table_locks_waited","Table_locks_immediate"),self.computer.ratioF("Table_locks_waited","Table_locks_immediate"),"0:1")
[*]tools.println("select per second(select/Uptime)",self.totalcomputer.psF(["Com_select","Qcache_hits"]),self.computer.psF(["Com_select","Qcache_hits"]))
[*]tools.println("insert per second(insert/Uptime)",self.totalcomputer.psF(["Com_insert","Com_insert_select"]),self.computer.psF(["Com_insert","Com_insert_select"]))
[*]tools.println("update per second(update/Uptime)",self.totalcomputer.psF(["Com_update","Com_update_multi"]),self.computer.psF(["Com_update","Com_update_multi"]))
[*]tools.println("delete per second(delete/Uptime)",self.totalcomputer.psF(["Com_delete","Com_delete_multi"]),self.computer.psF(["Com_delete","Com_delete_multi"]))
[*]tools.println("replace per second(replace/Uptime)",self.totalcomputer.psF(["Com_replace","Com_replace_select"]),self.computer.psF(["Com_replace","Com_replace_select"]))
[*]tools.println("Bytes sent per second(sent/Uptime)",self.totalcomputer.psF("Bytes_sent"),self.computer.psF("Bytes_sent"))
[*]tools.println("Bytes received per second(re/Uptime)",self.totalcomputer.psF("Bytes_received"),self.computer.psF("Bytes_received"))
[*]print "------------------------------------"
[*]print "Slow and Sort queries status"
[*]print "------------------------------------"
[*]tools.println("Slow queries Ratio(Slow/Questions)",self.totalcomputer.perF("Slow_queries", "Questions"),self.computer.perF("Slow_queries", "Questions"),"Lower than 0")
[*]tools.println("Slow queries PS(Slow/Uptime)",self.totalcomputer.psF("Slow_queries"),self.computer.psF("Slow_queries"),"Lower than 0")
[*]tools.println("Full join PS(full/Uptime)",self.totalcomputer.psF("Select_full_join"),self.computer.psF("Select_full_join"),"Lower than 0")
[*]tools.println("Sort merge passes PS(merge/Uptime)",self.totalcomputer.psF("Sort_merge_passes"),self.computer.psF("Sort_merge_passes"),"Lower than 0")
[*]tools.println("Sort range PS(range/Uptime)",self.totalcomputer.psF("Sort_range"),self.computer.psF("Sort_range"),"Lower than 0")
[*]tools.println("Sort rows PS(rows/Uptime)",self.totalcomputer.psF("Sort_rows"),self.computer.psF("Sort_rows"),"Lower than 0")
[*]tools.println("Sort scan PS(scan/Uptime)",self.totalcomputer.psF("Sort_scan"),self.computer.psF("Sort_scan"),"Lower than 0")
[*]print "------------------------------------"
[*]print "connections status"
[*]print "------------------------------------"
[*]tools.println("Thread cache hits(created/Total)",self.totalcomputer.dperF("Threads_created", "Connections"),self.computer.dperF("Threads_created", "Connections"),"Higher than 0")
[*]tools.println("Connections used ratio(Max used/Max)",self.totalcomputer.perF("Max_used_connections",self.mysqlvariables.param("max_connections")),self.computer.perF("Max_used_connections",self.mysqlvariables.param("max_connections")),"Lower than 90")
[*]tools.println("Aborted connects ratio(Aborted/Max)",self.totalcomputer.perF(["Aborted_clients","Aborted_connects"],self.mysqlvariables.param("max_connections")),self.computer.perF(["Aborted_clients","Aborted_connects"],self.mysqlvariables.param("max_connections")),"Lower than 0")
[*]tools.println("Threads running",self.totalcomputer.format("Threads_running"))
[*]print "------------------------------------"
[*]print "temp table and Open tables/files status"
[*]print "------------------------------------"
[*]tools.println("Temp tables to Disk ratio(disk/tmp)",self.totalcomputer.perF("Created_tmp_disk_tables", "Created_tmp_tables"),self.computer.perF("Created_tmp_disk_tables", "Created_tmp_tables"),"Lower than 0")
[*]tools.println("Open tables/table open cache/Opened tables",self.totalcomputer.remarks(["Open_tables",self.mysqlvariables.param("table_open_cache"),"Opened_tables"]),self.computer.remarks(["Open_tables",self.mysqlvariables.param("table_open_cache"),"Opened_tables"]),"1:1:1")
[*]tools.println("Opened files PS(Opened/Uptime)",self.totalcomputer.psF("Opened_files"),self.computer.psF("Opened_files"),"Lower than 0")
[*]
[*] def printInnoDBStatus(self):
[*]print "------------------------------------"
[*]print "InnoDB Status"
[*]print "------------------------------------"
[*]tools.println( "Innodb buffer read hits(Disk/total)",self.totalcomputer.dperF(["Innodb_buffer_pool_reads","Innodb_buffer_pool_read_ahead"], "Innodb_buffer_pool_read_requests"),self.computer.dperF(["Innodb_buffer_pool_reads","Innodb_buffer_pool_read_ahead"], "Innodb_buffer_pool_read_requests"),"Higher than 99.99")
[*]tools.println( "Innodb dblwr pages written:dblwr writes",self.totalcomputer.ratioF("Innodb_dblwr_pages_written", "Innodb_dblwr_writes"),"","Lower than 64:1")
[*]tools.println( "Innodb buffer pages used ratio(free/total)",self.totalcomputer.dperF("Innodb_buffer_pool_pages_free", "Innodb_buffer_pool_pages_total"),"","Lower than 99.99")
[*]tools.println( "Innodb buffer pages dirty ratio(dirty)",self.totalcomputer.perF("Innodb_buffer_pool_pages_dirty", "Innodb_buffer_pool_pages_total"))
[*]tools.println( "Innodb buffer pages flushed PS(flushed)",self.totalcomputer.psF("Innodb_buffer_pool_pages_flushed"),self.computer.psF("Innodb_buffer_pool_pages_flushed"))
[*]tools.println( "Innodb buffer pool pages misc",self.totalcomputer.format("Innodb_buffer_pool_pages_misc"))
[*]
[*]tools.println( "Innodb row lock waits PS(waits/Uptime)",self.totalcomputer.psF("Innodb_row_lock_current_waits"),self.computer.psF("Innodb_row_lock_current_waits"))
[*]tools.println( "Innodb row lock current waits",self.totalcomputer.format("Innodb_row_lock_current_waits"),self.computer.format("Innodb_row_lock_current_waits"))
[*]tools.println( "Innodb row lock time avg",self.totalcomputer.format("Innodb_row_lock_time_avg"),self.computer.format("Innodb_row_lock_time_avg"))
[*]tools.println( "Innodb row lock time max",self.totalcomputer.format("Innodb_row_lock_time_max"),self.computer.format("Innodb_row_lock_time_max"))
[*]tools.println( "Innodb row lock time total",self.totalcomputer.format("Innodb_row_lock_time"),self.computer.format("Innodb_row_lock_time"))
[*]
[*]tools.println( "Innodb rows read PS(read/Uptime)",self.totalcomputer.psF("Innodb_rows_read"),self.computer.psF("Innodb_rows_read"))
[*]tools.println( "Innodb rows inserted PS(inserted/Uptime)",self.totalcomputer.psF("Innodb_rows_inserted"),self.computer.psF("Innodb_rows_inserted"))
[*]tools.println( "Innodb rows updated PS(updated/Uptime)",self.totalcomputer.psF("Innodb_rows_updated"),self.computer.psF("Innodb_rows_updated"))
[*]
[*]tools.println( "Innodb data reads PS(reads/Uptime)",self.totalcomputer.psF("Innodb_data_reads"),self.computer.psF("Innodb_data_reads"))
[*]tools.println( "Innodb data writes PS(writes/Uptime)",self.totalcomputer.psF("Innodb_data_writes"),self.computer.psF("Innodb_data_writes"))
[*]tools.println( "Innodb data fsyncs PS(fsyncs/Uptime)",self.totalcomputer.psF("Innodb_data_fsyncs"),self.computer.psF("Innodb_data_fsyncs"))
[*]
[*]tools.println( "Innodb data pending reads PS(reads/Uptime)",self.totalcomputer.psF("Innodb_data_pending_reads"),self.computer.psF("Innodb_data_pending_reads"))
[*]tools.println( "Innodb data pending writes PS(write/Uptime)",self.totalcomputer.psF("Innodb_data_pending_writes"),self.computer.psF("Innodb_data_pending_writes"))
[*]tools.println( "Innodb data pending fsyncs PS(fsync/Uptime)",self.totalcomputer.psF("Innodb_data_pending_fsyncs"),self.computer.psF("Innodb_data_pending_fsyncs"))
[*]
[*] def printkeystatus(self):
[*]print "------------------------------------"
[*]print "key buffer Status"
[*]print "------------------------------------"
[*]tools.println( "key buffer used ratio(used/size)",self.totalcomputer.perF(self.totalcomputer.ext("Key_blocks_used",self.mysqlvariables.param("key_cache_block_size")), self.mysqlvariables.param("key_buffer_size")),"","Lower than 99.99")
[*]tools.println( "key buffer read hit ratio(reads/request)",self.totalcomputer.dperF("Key_reads", "Key_read_requests"),self.computer.dperF("Key_reads", "Key_read_requests"),"Higher than 99.99")
[*]tools.println( "key buffer write hit ratio(writes/request)",self.totalcomputer.dperF("Key_writes", "Key_write_requests"),self.computer.dperF("Key_writes", "Key_write_requests"),"Higher than 99.99")
[*]
[*] def printslavestatus(self):
[*]print "------------------------------------"
[*]print "Slave Status"
[*]print "------------------------------------"
[*]tools.println( "Slave running status",self.totalcomputer.strFormat("Slave_running"),self.computer.strFormat("Slave_running"))
[*]tools.println( "Slave open temp tables",self.totalcomputer.format("Slave_open_temp_tables"),self.computer.format("Slave_open_temp_tables"))
[*]tools.println( "Slave transactions PS(transactions/Uptime)",self.totalcomputer.psF("Slave_retried_transactions"),self.computer.psF("Slave_retried_transactions"))
[*]tools.println( "Slave received PS(heartbeats/Uptime)",self.totalcomputer.psF("Slave_received_heartbeats"),self.computer.psF("Slave_received_heartbeats"))
[*]
[*] def printflushstatus(self):
[*]print "=========MySQL status pulse========="
[*]self.printUptimesinceflushstatus()
[*]if(self.version> Decimal("5")):
[*] self.printInnoDBStatus()
[*]self.printkeystatus()
[*]self.printQcachestatus()
[*]self.printslavestatus()
[*]#self.printtablestatus()
[*]
[*]class mysqlmonitor(object):
[*] def __init__(self,dbs):
[*]self.conn=MySQLdb.connect(host=dbs['host'],port=dbs['port'],user=dbs['user'],passwd=dbs['passwd'],db=dbs['db'])
[*]self.monitor=mysqlstatusmonitor(self.conn)
[*]self.pulse=mysqlpulse(self.conn,self.monitor)
[*]
[*] def du(self):
[*]self.pulse.printmysqlinfo()
[*]self.pulse.printprocesslist()
[*]self.pulse.printflushstatus()
[*]#printstatus(statustmp, statustmpList)
[*]#printinnodbstatus(innodbstatus)
[*]
[*] def __del__( self ):
[*]self.conn.close()
[*]
[*]if __name__ == '__main__':
[*] host = raw_input("Host:")
[*] if not host.strip():host = "localhost"
[*] user = raw_input("User:")
[*] if not user.strip():user = "root"
[*] pwd = getpass.getpass("Password %s@%s:" % (user,host))
[*] dbname = raw_input("database:")
[*] if not dbname.strip():dbname = "mysql"
[*] dbs={'host':host,'port':port,'user':user,'passwd':pwd,'db':dbname}
[*] monitor=mysqlmonitor(dbs)
[*] monitor.du()
[*]
转载自http://www.linuxidc.com/Linux/2012-08/67832.htm
MySQL的优化我分为两个部分,一是服务器物理硬件的优化,二是MySQL自身(my.cnf)的优化。
一、服务器硬件对MySQL性能的影响
①磁盘寻道能力(磁盘I/O),以目前高转速SCSI硬盘(7200转/秒)为例,这种硬盘理论上每秒寻道7200次,这是物理特性决定的,没有办法改变。MySQL每秒钟都在进行大量、复杂的查询操作,对磁盘的读写量可想而知。所以,通常认为磁盘I/O是制约MySQL性能的最大因素之一,对于日均访问量在100万PV以上的Discuz!论坛,由于磁盘I/O的制约,MySQL的性能会非常低下!解决这一制约因素可以考虑以下几种解决方案: 使用RAID-0+1磁盘阵列,注意不要尝试使用RAID-5,MySQL在RAID-5磁盘阵列上的效率不会像你期待的那样快。
②CPU 对于MySQL应用,推荐使用S.M.P.架构的多路对称CPU,例如:可以使用两颗Intel Xeon 3.6GHz的CPU,现在我较推荐用4U的服务器来专门做数据库服务器,不仅仅是针对于mysql。
③物理内存对于一台使用MySQL的Database Server来说,服务器内存建议不要小于2GB,推荐使用4GB以上的物理内存,不过内存对于现在的服务器而言可以说是一个可以忽略的问题,工作中遇到了高端服务器基本上内存都超过了16G。
二、MySQL自身因素当解决了上述服务器硬件制约因素后,让我们看看MySQL自身的优化是如何操作的。对MySQL自身的优化主要是对其配置文件my.cnf中的各项参数进行优化调整。下面我们介绍一些对性能影响较大的参数。 由于my.cnf文件的优化设置是与服务器硬件配置息息相关的,因而我们指定一个假想的服务器硬件环境:CPU: 2颗Intel Xeon 2.4GHz 内存: 4GB DDR 硬盘: SCSI 73GB(很常见的2U服务器)。
下面,我们根据以上硬件配置结合一份已经优化好的my.cnf进行说明:
#vim /etc/my.cnf以下只列出my.cnf文件中段落中的内容,其他段落内容对MySQL运行性能影响甚微,因而姑且忽略。
port = 3306
serverid = 1
socket = /tmp/mysql.sock
skip-locking
#避免MySQL的外部锁定,减少出错几率增强稳定性。
skip-name-resolve
#禁止MySQL对外部连接进行DNS解析,使用这一选项可以消除MySQL进行DNS解析的时间。但需要注意,如果开启该选项,则所有远程主机连接授权都要使用IP地址方式,否则MySQL将无法正常处理连接请求!
back_log = 384
#back_log参数的值指出在MySQL暂时停止响应新请求之前的短时间内多少个请求可以被存在堆栈中。 如果系统在一个短时间内有很多连接,则需要增大该参数的值,该参数值指定到来的TCP/IP连接的侦听队列的大小。不同的操作系统在这个队列大小上有它自己的限制。 试图设定back_log高于你的操作系统的限制将是无效的。默认值为50。对于Linux系统推荐设置为小于512的整数。
key_buffer_size = 256M
#key_buffer_size指定用于索引的缓冲区大小,增加它可得到更好的索引处理性能。对于内存在4GB左右的服务器该参数可设置为256M或384M。注意:该参数值设置的过大反而会是服务器整体效率降低!
max_allowed_packet = 4M
thread_stack = 256K
table_cache = 128K
sort_buffer_size = 6M
#查询排序时所能使用的缓冲区大小。注意:该参数对应的分配内存是每连接独占,如果有100个连接,那么实际分配的总共排序缓冲区大小为100 × 6 = 600MB。所以,对于内存在4GB左右的服务器推荐设置为6-8M。
read_buffer_size = 4M
#读查询操作所能使用的缓冲区大小。和sort_buffer_size一样,该参数对应的分配内存也是每连接独享。
join_buffer_size = 8M
#联合查询操作所能使用的缓冲区大小,和sort_buffer_size一样,该参数对应的分配内存也是每连接独享。
myisam_sort_buffer_size = 64M
table_cache = 512
thread_cache_size = 64
query_cache_size = 64M
#指定MySQL查询缓冲区的大小。可以通过在MySQL控制台观察,如果Qcache_lowmem_prunes的值非常大,则表明经常出现缓冲不够的情况;如果Qcache_hits的值非常大,则表明查询缓冲使用非常频繁,如果该值较小反而会影响效率,那么可以考虑不用查询缓冲;Qcache_free_blocks,如果该值非常大,则表明缓冲区中碎片很多。
tmp_table_size = 256M
max_connections = 768
#指定MySQL允许的最大连接进程数。如果在访问论坛时经常出现Too Many Connections的错误提 示,则需要增大该参数值。
max_connect_errors = 10000000
wait_timeout = 10
#指定一个请求的最大连接时间,对于4GB左右内存的服务器可以设置为5-10。
thread_concurrency = 8
#该参数取值为服务器逻辑CPU数量*2,在本例中,服务器有2颗物理CPU,而每颗物理CPU又支持H.T超线程,所以实际取值为4*2=8
skip-networking
#开启该选项可以彻底关闭MySQL的TCP/IP连接方式,如果WEB服务器是以远程连接的方式访问MySQL数据库服务器则不要开启该选项!否则将无法正常连接!
table_cache=1024
#物理内存越大,设置就越大.默认为2402,调到512-1024最佳
innodb_additional_mem_pool_size=4M
#默认为2M
innodb_flush_log_at_trx_commit=1
#设置为0就是等到innodb_log_buffer_size列队满后再统一储存,默认为1
innodb_log_buffer_size=2M
#默认为1M
innodb_thread_concurrency=8
#你的服务器CPU有几个就设置为几,建议用默认一般为8
key_buffer_size=256M
#默认为218,调到128最佳
tmp_table_size=64M
#默认为16M,调到64-256最挂
read_buffer_size=4M
#默认为64K
read_rnd_buffer_size=16M
#默认为256K
sort_buffer_size=32M
#默认为256K
thread_cache_size=120
#默认为60
query_cache_size=32M
※值得注意的是:
很多情况需要具体情况具体分析
一、如果Key_reads太大,则应该把my.cnf中Key_buffer_size变大,保持Key_reads/Key_read_requests至少1/100以上,越小越好。
二、如果Qcache_lowmem_prunes很大,就要增加Query_cache_size的
转载自http://blog.csdn.net/xujinyang/article/details/7276945
页:
[1]