设为首页 收藏本站
查看: 2191|回复: 0

[经验分享] Python Spider 爬虫实例

[复制链接]

尚未签到

发表于 2017-4-29 11:47:59 | 显示全部楼层 |阅读模式
  


Python Spider 爬虫实例




Neo Chen (netkiller)

  
<openunix@163.com>









版权 © 2011 http://netkiller.github.com



摘要
  .









  目录

爬虫实力




爬虫实力







  主要的功能是爬出所有URL
  稍加修改可以加入SQL注入检查,跨站脚本攻击检查等等...


#!/usr/bin/env python3
#-*- coding: utf-8 -*-
##############################################
# Home  : http://netkiller.github.com
# Author: Neo <openunix@163.com>
##############################################
##############################################
from multiprocessing import Process
from multiprocessing import Pool
from html.parser import HTMLParser,HTMLParseError
import asyncore, asynchat, socket, threading, queue
import subprocess, os, sys, getopt, configparser, logging
import string, re
import random,time
import urllib.request, urllib.parse, http.client
#from urllib.parse import urlparse
queue = queue.Queue()
class MyHTMLParser(HTMLParser):
urls = []
def handle_starttag(self, tag, attrs):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, print it.
if name == "href":
#print name, "=", value
if value and (value.find('javascript') == -1) and (value not in ('#')):
self.urls.append(value)
#def handle_endtag(self, tag):
#print("Encountered  an end tag:", tag)
#pass
#def handle_data(self, data):
#print("Encountered   some data:", data)
#pass
def gethref(self):
return self.urls
class Spider():
logfile = '/tmp/spider.log'
isdebug = False
depths = 0
link= []
unlink= []
referer = ''
useragent = 'Neo spider'
domain= r''
baseurl= r''
skip= []
ignore= []
threadname = ''
def __init__(self, threadname = None):
logging.basicConfig(level=logging.NOTSET,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=self.logfile,
filemode='a')
self.logging = logging.getLogger()
if threadname:
self.threadname = '|'+threadname
def setDebug(self,isdebug):
self.isdebug = isdebug
self.logging.debug('Enable Debug')
def setDomain(self, tmp):
if tmp:
self.domain = tmp
def setReferer(self,tmp):
if tmp:
self.referer = tmp
def setUseragent(self,tmp):
if tmp:
self.useragent = tmp
def setBaseUrl(self,tmp):
if tmp:
self.baseurl = tmp
def ufilter(self,url):
if (url not in self.link) :
self.link.append(url)
else:
if url not in self.skip:
self.skip.append(url)
self.logging.warning('Skip ' + url)
return(None)
if url[0:1] == '/':
return(self.baseurl + url)
elif url.find('http://') == -1:
return(self.baseurl +'/'+ url)
else:
if url.find(self.domain) == -1:
if url not in self.ignore:
self.logging.warning('Ignore ' + url)
self.ignore.append(url)
return(None)
return(url)
def working(self, myurl):
self.depths = self.depths + 1
if self.depths > 256:
return()
#if self.isdebug:
#self.logging.debug('>>>' + str(self.depths))
url = self.ufilter(myurl)
if url == None:
return()
else:
self.link.append(myurl)
if self.baseurl:
urlparse = urllib.parse.urlparse(url)
self.setBaseUrl(urlparse.scheme+ '://' + urlparse.netloc)
#self.setDomain(urlparse.netloc)
#print(urlparse)
#uri.path
#uri.params
#uri.query
#uri.fragment
try:
lines= []
parser = MyHTMLParser()
#html = urllib.request.urlopen(myurl)
#body = html.read()
#html.close()
req = urllib.request.Request(url)
req.add_header('User-agent', self.useragent)
req.add_header('Referer', self.referer)
response = urllib.request.urlopen(req, timeout = 10)
status = response.status
reason = response.reason
headers = response.info()
log = str(status)+' '+reason+' '+myurl+ ' ('+ str(self.depths)+self.threadname+') '
if self.isdebug:
#print(response.geturl())
print(log)
#print(headers)
if headers['Content-Type'] in ('text/html'):
if status == 200:
body = response.read()
parser.feed(bytes.decode(body))
lines = parser.gethref()
self.logging.info(log)
elif status == 302:
self.unlink.append(myurl)
self.logging.critical(log)
else:
self.logging.warning(log)
response.close()
if lines:
self.referer = random.choice(lines)
for line in lines:
#result = re.match ('/http://"(.*)"/"(.*)"', line)
self.working(line)
self.depths = self.depths - 1
#if self.isdebug:
#self.logging.debug('<<<' + str(self.depths))
else:
self.logging.warning(log + ' ' + headers['Content-Type'])
response.close()
except socket.timeout as e:
self.logging.error(str(e) +' '+ myurl)
except urllib.error.URLError as e:
if self.isdebug:
print (str(e) +' '+myurl + ' - ' + url)
if (e.code == 404):
self.unlink.append(myurl)
else:
print(e.code)
self.logging.critical(str(e) +' '+ myurl)
except urllib.error.HTTPError as e:
self.logging.critical(str(e) +' '+ myurl)
except HTMLParseError as e:
self.logging.error(str(e) +' '+ myurl)
if self.isdebug:
print (str(e) +' '+ myurl)
except UnicodeDecodeError as e:
self.logging.critical(str(e) +' '+ myurl)
except ValueError as e:
if self.isdebug:
print (str(e) +' '+ myurl)
#else:
#self.logging.error(myurl)
#finally:
#self.depths = self.depths - 1
#pass
class ThreadSpider(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.spider = Spider(str(self.name))
self.spider.setDebug(True)
def run(self):
while True:
#grabs host from queue
host = self.queue.get()
self.spider.setDomain(host)
self.spider.working(host)
#signals to queue job is done
self.queue.task_done()
def Multithreading():
workers= 5
hosts = ['http://www.example.com/','http://brand.example.com/','http://list.example.com/','http://item.example.com/']
#spawn a pool of threads, and pass them queue instance
for i in range(workers):
t = ThreadSpider(queue)
t.setDaemon(True)
t.start()
#populate queue with data   
for host in hosts:
queue.put(host)
#wait on the queue until everything has been processed     
queue.join()
#p = Pool(processes = 5)
#p.map(test, [b'http://www.163.com/',b'http://www.sina.com/',b'http://www.qq.com/'])
def test(url):
spider = Spider()
spider.setDebug(True)
spider.working(url)
print(url)
def daemon(isdaemon):
if isdaemon :
pid = os.fork()
if pid > 0:
sys.exit(0)
def main():
daemon(isdaemon = False)
myurl = r'http://www.example.com'
#myurl = r'http://www.example.com/mobile.html'
try:
start = time.time()
spider = Spider()
spider.setDebug(True)
spider.setDomain('www.example.com')
spider.setBaseUrl(myurl)
spider.working(myurl)
print("Elapsed Time: %s" % (time.time() - start))
except RuntimeError as e:
print(e)
if __name__ == '__main__':
try:
main()
#Multithreading()
except KeyboardInterrupt:
print ("Crtl+C Pressed. Shutting down.")

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-370747-1-1.html 上篇帖子: [转]Python中文排序 下篇帖子: python中的argparse(转载)
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表