fromwhoosh.index import create_in fromwhoosh.fields import * fromwhoosh.analysis import RegexAnalyzer fromwhoosh.analysis import Tokenizer,Token
class ChineseTokenizer(Tokenizer):
"""
中文分词解析器
"""
def __call__(self, value, positions=False, chars=False,
keeporiginal=True, removestops=True, start_pos=0, start_char=0,
mode='', **kwargs):
assert isinstance(value, text_type), "%r is not unicode "% value
t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)
list_seg = jieba.cut_for_search(value)
for w in list_seg:
t.original = t.text = w
t.boost = 0.5
if positions:
t.pos = start_pos + value.find(w)
if chars:
t.startchar = start_char + value.find(w)
t.endchar = start_char + value.find(w) + len(w)
yield t
def chinese_analyzer():
return ChineseTokenizer()
@staticmethod
def create_index(document_dir):
analyzer = chinese_analyzer()
schema = Schema(titel=TEXT(stored=True, analyzer=analyzer), path=ID(stored=True),
content=TEXT(stored=True, analyzer=analyzer))
ix = create_in("./", schema)
writer = ix.writer()
for parents, dirnames, filenames in os.walk(document_dir):
for filename in filenames:
title = filename.replace(".txt", "").decode('utf8')
print title
content = open(document_dir + '/' + filename, 'r').read().decode('utf-8')
path = u"/b"
writer.add_document(titel=title, path=path, content=content)
writer.commit()
@staticmethod
def search(search_str):
title_list = []
print 'here'
ix = open_dir("./")
searcher = ix.searcher()
print search_str,type(search_str)
results = searcher.find("content", search_str)
for hit in results:
print hit['titel']
print hit.score
print hit.highlights("content", top=10)
title_list.append(hit['titel'])
print 'tt',title_list
return title_list
机械节能产品生产企业官网模板...
大气智能家居家具装修装饰类企业通用网站模板...
礼品公司网站模板
宽屏简约大气婚纱摄影影楼模板...
蓝白WAP手机综合医院类整站源码(独立后台)...苏ICP备2024110244号-2 苏公网安备32050702011978号 增值电信业务经营许可证编号:苏B2-20251499 | Copyright 2018 - 2025 源码网商城 (www.ymwmall.com) 版权所有