|
# coding=utf-8 ##以utf-8编码储存中文字符
# author : junzi
# date : 10_04_20
# 感谢以下website
# http://wiki.ubuntu.org.cn/Python%E6%AD%A3%E5%88%99%E8%A1%A8%E8%BE%BE%E5%BC%8F%E6%93%8D%E4%BD%9C%E6%8C%87%E5%8D%97
# http://www.panopticon.jp/blog/2007/11/182337.html
# http://www.crummy.com/software/BeautifulSoup/documentation.zh.html#Searching%20Within%20the%20Parse%20Tree
# http://doc.chinahtml.com/Manual/Python/tut/tut.html
# http://www.python.jp/doc/2.5/lib/csv-examples.html
# http://hi.baidu.com/daping_zhang/blog/item/09dda71ea9d7d21f4134173e.html
# http://hi.baidu.com/javalang/blog/item/84bac4bf731fb80f18d81fe1.html
# http://www.cnblogs.com/sislcb/archive/2008/12/15/1355481.html
from BeautifulSoup import BeautifulSoup
import re
import urllib2
import csv
import sys
global writeContent
# 处理子页面
def checkSub(suburl):
# 进入到suburl子页面
response = urllib2.urlopen(suburl)
# 加载
html = response.read()
# 获得页面内容
subsoup = BeautifulSoup(html)
## 找到
leftconts = subsoup.findAll("div",{"class":"main_cont"})
checkTag(leftconts)
##次へ
if hasNext(subsoup):
suburl = getNext(subsoup)
print suburl
# checkSub(suburl)
# 获得[次へ]的地址
def getNext(subsoup):
leftconts = subsoup.findAll("div",{"class":"left_pager"})[0]
lis = leftconts.findAll("li")
nextLi = lis[len(lis)-1]
if nextLi.findAll("b")!=0:
result = nextLi.findAll("a")[0]["href"]
return homepage+"/"+result
# 是否包含[次へ]
def hasNext(subsoup):
if len(subsoup.findAll("div",{"class":"left_pager"}))==0:
return False;
return True;
# 获得需要解析内容标签
# 传入一段html,将解析的数据放到集合中
def checkTag(leftconts):
for leftcont in leftconts:
if len(leftcont.findAll("div",{"class":"ul_link_region"}))==0:
continue
# 名字
namae = leftcont.findAll("div",{"class":"dl_area"})[0].dl.dt.a.string.encode('shift-jis')
print namae
# 地址
adoresu = leftcont.findAll("div",{"class":"ul_area"})[0].ul.li.string.encode('shift-jis')
print adoresu
# 电话
tele = leftcont.findAll("span",{"class":"c_g"})[0].string.encode('shift-jis')
print tele
# 地1
jimei = leftcont.findAll("div",{"class":"ul_link_region"})[0].findAll("a")
genna = jimei[0].string.encode('shift-jis')
print genna
# 地2
shina = jimei[1].string.encode('shift-jis')
print shina
# 地3
machina = jimei[2].string.encode('shift-jis')
print machina
# 放到集合中
result = [namae,adoresu,tele,genna,shina,machina]
writeContent.append(result)
print "==================================="
# 写入文件
def writeToFile():
writer = csv.writer(file('my.csv', 'w'))
# writer = csv.writer(file('my.csv', 'wb'))
writer.writerow(['Column1', 'Column2', 'Column3','Column4', 'Column5', 'Column6'])
for line in writeContent:
writer.writerow(line)
# !!!!开始!!!!
# 首页
homepage = "http://www.wedding-mnavi.com"
# 获得页面
response = urllib2.urlopen(homepage)
html = response.read()
# 创建一个soup对象
soup = BeautifulSoup(html)
# 获得地名链接的div
subplaces = soup.findAll("div",{"class":"gnavi_area"})
temp = subplaces[0]
soupsp = BeautifulSoup(str(temp))
# 获得地名链接
placenamelist=[]
# ******初始化静态集合,用来存放解析好的数据
writeContent=[]
# 从首页中找出一共有total个地区需要解析
total = len(soupsp('a'))
# 开始迭代每一个地区
for i in range(1):
# 获得每一个地区的地址
placenamelist.append(homepage+"/"+soupsp.findAll("a",{"href":"address2.php?state="+str(i+1)})[0]['href'])
# 对每一个地区分别解析
for suburl in placenamelist:
#進め!!!
checkSub(suburl)
# 写入文件
writeToFile()
|
|
|