本人菜鸟,要对lon文件夹下的20个txt文档进行中文分词,且去停用词,停用词表stopword.txt,运行结果并没有去除停用词,求大神解答代码如下
#encoding=utf-8
import sys
import re
import codecs
import os
import shutil
import jieba
import jieba.analyse
#导入自定义词典
#jieba.load_userdict("dict_baidu.txt")
stopwords = {}.fromkeys([ line.rstrip() for line in open('stopword.txt') ])
#Read file and cut
def read_file_cut():
#create path
path = "lon\"
respath = "lon_Result\"
if os.path.isdir(respath):
shutil.rmtree(respath, True)
os.makedirs(respath)
num = 1
while num<=20:
name = "%d" % num
fileName = path + str(name) + ".txt"
resName = respath + str(name) + ".txt"
source = open(fileName, 'r')
if os.path.exists(resName):
os.remove(resName)
result = codecs.open(resName, 'w', 'utf-8')
line = source.readline()
line = line.rstrip('\n')
while line!="":
line = unicode(line, "utf-8")
seglist = jieba.cut(line,cut_all=False) #精确模式
output = ' '.join(list(seglist)) #空格拼接
for seg in seglist:
seg=seg.encode('gbk')
if seg not in stopwords:
output+=seg
print output
result.write(output + '\r\n')
line = source.readline()
else:
print 'End file: ' + str(num)
source.close()
result.close()
num = num + 1
else:
print 'End All'
#Run function
if name == '__main__':
read_file_cut()