# -*- coding: cp936 -*-
import jieba
import jieba.posseg as pseg
import os
import sys
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
sys.path.append("C:\Users\Administrator\Desktop\9.17")
from numpy import *
fr = open('exercise.txt')
fr_list = fr.read()
dataList = fr_list.split('\n')
data = []
for oneline in dataList:
data.append(" ".join(jieba.cut(oneline)))
#将得到的词语转换为词频矩阵
freWord = CountVectorizer()
#统计每个词语的tf-idf权值
transformer = TfidfTransformer()
#计算出tf-idf(第一个fit_transform),并将其转换为tf-idf矩阵(第二个fit_transformer)
tfidf = transformer.fit_transform(freWord.fit_transform(data))
#获取词袋模型中的所有词语
word = freWord.get_feature_names()
#得到权重
weight = tfidf.toarray()
tfidfDict = {}
for i in range(len(weight)):
for j in range(len(word)):
getWord = word[j]
getValue = weight[j]
if getValue != 0:
if tfidfDict.has_key(getWord):
tfidfDict[getword] += string.atof(getValue)
else:
tfidfDict.update({getWord:getValue})
sorted_tfidf = sorted(tfidfDict.iteritems(),
key = lambda d:d[1],reverse = True)
fw = open('result.txt','w')
for i in sorted_tfidf:
fw.write(i[0] + '\t' + str(i[1]) +'\n')
至此,对算法已经有了一个简单的实现,接下来需要做的是将其应用到文档聚类中加以运用。 四 实现简单的文本聚类
要聚类,聚什么是重点!结合上述分析,我们可以将一篇文章中的关键词和对应的tf-idf值一一对应起来,显然想到的是dict,那么聚类是聚的当然不止一篇文章,那么我们就可以分别将每篇文章的关键词和对应的tf-idf值对应起来,最后整合起来进行聚类,当然还是得用到dict。
结合上述tf-idf的实现,可以将得到的结果分别存在同一个目录下的.txt中,导入目录读取并整合,直接上代码:
# -*- coding: cp936 -*-
#-*- coding:utf-8 -*-
from PIL import Image,ImageDraw
import os, codecs, random
from math import sqrt
#将得到的结果按照字典存放
rows_norms = {}
def readfile(dirname):
rows = {}
for f in os.listdir(dirname):#目录
fr = codecs.open(dirname + f,'r',encoding = 'utf-8')
tw_dict = {}
norm = 0
for line in fr:
items = line.split('\t')
token = items[0].strip()
if len(token)<2:
continue
w = float(items[1].strip())
norm = w**2
tw_dict[token] = w
rows[str(f[:-4])] = tw_dict
rows_norms[str(f[:-4])] = sqrt(float(norm))
#print len(rows)
return rows
至此,相当于得到了数据,接下来就是k-means算法的实现了,之前的文章中都有详细说明,在此不再赘述,所不同的是在此采用了余弦距离计算相似度:
#得到余弦距离,其中v1就是row,v2是聚类中心点
def cosine(v1,norm_v1,v2,norm_v2):
if norm_v1 == 0 or norm_v2 == 0:
return 1.0
dividend = 0
for k,v in v1.items():
for k in v2:
dividend += v*v2[k]
return 1.0-dividend/(norm_v1*norm_v2)
主程序段如下:
#算法的实现
def kcluster(rows,distance=cosine,k=3):
ranges=rows_range(rows)
#初始化聚类中心
clusters=[]
for i in range(k):
clusters.append(random_vec(ranges))
clusteres_norm=[]
for i in range(k):
clusteres_norm.append(norm(clusters))
lastmatches=None
#开始迭代
for t in range(300):
print '第%d次迭代' % t
bestmatches=[[] for i in range(k)]
for j in rows.keys():
row=rows[j]
row_norm=rows_norms[j]
bestmatch=0
min_dis=10000000
for i in range(k):
d=distance(row, row_norm, clusters,clusteres_norm)
if d<min_dis:
bestmatch=i
min_dis=d
bestmatches[bestmatch].append(j)
if bestmatches==lastmatches:
break
lastmatches=bestmatches
for i in range(k):
clusters=center(bestmatches, rows)
print bestmatches
return bestmatches
#test
if __name__ == '__main__':
corpus_dir='D:/python2.7/exercise/clusting/data/'
rows=readfile(corpus_dir)
print 'create vectorspace'
n=3
clust=kcluster(rows,k=n)
简单测试,结果还是挺理想的,但还是可以结合之前对k-means算法的优化,实现更好的聚类。