Commit 61314e57 authored by lishen's avatar lishen

[init]

parents
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python3
# coding: utf-8
# File: chatbot_graph.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-4
from py2neo import Graph
from question_classifier import *
from question_parser import *
from answer_search import *
'''问答类'''
class ChatBotGraph:
def __init__(self):
self.g = Graph(
'http://127.0.0.1:7474',
user="neo4j",
password="neo4j")
self.classifier = QuestionClassifier(self.g)
self.parser = QuestionPaser()
self.searcher = AnswerSearcher(self.g)
def chat_main(self, sent):
answer = '您好,我是医药智能助理-小益,希望可以帮到您。如果没答上来,可联系人工客服。祝您身体棒棒!'
res_classify = self.classifier.classify(sent)
if not res_classify:
return answer
res_sql = self.parser.parser_main(res_classify)
final_answers = self.searcher.search_main(res_sql)
if not final_answers:
return answer
else:
return '\n'.join(final_answers)
if __name__ == '__main__':
handler = ChatBotGraph()
while 1:
question = input('用户:')
answer = handler.chat_main(question)
print('小益:', answer)
#!/usr/bin/env python3
# coding: utf-8
# File: build_data.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-3
import pymongo
from lxml import etree
import os
from max_cut import *
class MedicalGraph:
def __init__(self):
self.conn = pymongo.MongoClient()
cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
self.db = self.conn['medical']
self.col = self.db['data']
first_words = [i.strip() for i in open(os.path.join(cur_dir, 'first_name.txt'))]
alphabets = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y', 'z']
nums = ['1','2','3','4','5','6','7','8','9','0']
self.stop_words = first_words + alphabets + nums
self.key_dict = {
'医保疾病' : 'yibao_status',
"患病比例" : "get_prob",
"易感人群" : "easy_get",
"传染方式" : "get_way",
"就诊科室" : "cure_department",
"治疗方式" : "cure_way",
"治疗周期" : "cure_lasttime",
"治愈率" : "cured_prob",
'药品明细': 'drug_detail',
'药品推荐': 'recommand_drug',
'推荐': 'recommand_eat',
'忌食': 'not_eat',
'宜食': 'do_eat',
'症状': 'symptom',
'检查': 'check',
'成因': 'cause',
'预防措施': 'prevent',
'所属类别': 'category',
'简介': 'desc',
'名称': 'name',
'常用药品' : 'common_drug',
'治疗费用': 'cost_money',
'并发症': 'acompany'
}
self.cuter = CutWords()
def collect_medical(self):
cates = []
inspects = []
count = 0
for item in self.col.find():
data = {}
basic_info = item['basic_info']
name = basic_info['name']
if not name:
continue
# 基本信息
data['名称'] = name
data['简介'] = '\n'.join(basic_info['desc']).replace('\r\n\t', '').replace('\r\n\n\n','').replace(' ','').replace('\r\n','\n')
category = basic_info['category']
data['所属类别'] = category
cates += category
inspect = item['inspect_info']
inspects += inspect
attributes = basic_info['attributes']
# 成因及预防
data['预防措施'] = item['prevent_info']
data['成因'] = item['cause_info']
# 并发症
data['症状'] = list(set([i for i in item["symptom_info"][0] if i[0] not in self.stop_words]))
for attr in attributes:
attr_pair = attr.split(':')
if len(attr_pair) == 2:
key = attr_pair[0]
value = attr_pair[1]
data[key] = value
# 检查
inspects = item['inspect_info']
jcs = []
for inspect in inspects:
jc_name = self.get_inspect(inspect)
if jc_name:
jcs.append(jc_name)
data['检查'] = jcs
# 食物
food_info = item['food_info']
if food_info:
data['宜食'] = food_info['good']
data['忌食'] = food_info['bad']
data['推荐'] = food_info['recommand']
# 药品
drug_info = item['drug_info']
data['药品推荐'] = list(set([i.split('(')[-1].replace(')','') for i in drug_info]))
data['药品明细'] = drug_info
data_modify = {}
for attr, value in data.items():
attr_en = self.key_dict.get(attr)
if attr_en:
data_modify[attr_en] = value
if attr_en in ['yibao_status', 'get_prob', 'easy_get', 'get_way', "cure_lasttime", "cured_prob"]:
data_modify[attr_en] = value.replace(' ','').replace('\t','')
elif attr_en in ['cure_department', 'cure_way', 'common_drug']:
data_modify[attr_en] = [i for i in value.split(' ') if i]
elif attr_en in ['acompany']:
acompany = [i for i in self.cuter.max_biward_cut(data_modify[attr_en]) if len(i) > 1]
data_modify[attr_en] = acompany
try:
self.db['medical'].insert(data_modify)
count += 1
print(count)
except Exception as e:
print(e)
return
def get_inspect(self, url):
res = self.db['jc'].find_one({'url':url})
if not res:
return ''
else:
return res['name']
def modify_jc(self):
for item in self.db['jc'].find():
url = item['url']
content = item['html']
selector = etree.HTML(content)
name = selector.xpath('//title/text()')[0].split('结果分析')[0]
desc = selector.xpath('//meta[@name="description"]/@content')[0].replace('\r\n\t','')
self.db['jc'].update({'url':url}, {'$set':{'name':name, 'desc':desc}})
if __name__ == '__main__':
handler = MedicalGraph()
#!/usr/bin/env python3
# coding: utf-8
# File: data_spider.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-3
import urllib.request
import urllib.parse
from lxml import etree
import pymongo
import re
'''基于司法网的犯罪案件采集'''
class CrimeSpider:
def __init__(self):
self.conn = pymongo.MongoClient()
self.db = self.conn['medical']
self.col = self.db['data']
'''根据url,请求html'''
def get_html(self, url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.63 Safari/537.36'}
req = urllib.request.Request(url=url, headers=headers)
res = urllib.request.urlopen(req)
html = res.read().decode('gbk')
return html
'''url解析'''
def url_parser(self, content):
selector = etree.HTML(content)
urls = ['http://www.anliguan.com' + i for i in selector.xpath('//h2[@class="item-title"]/a/@href')]
return urls
'''测试'''
def spider_main(self):
for page in range(1, 11000):
try:
basic_url = 'http://jib.xywy.com/il_sii/gaishu/%s.htm'%page
cause_url = 'http://jib.xywy.com/il_sii/cause/%s.htm'%page
prevent_url = 'http://jib.xywy.com/il_sii/prevent/%s.htm'%page
symptom_url = 'http://jib.xywy.com/il_sii/symptom/%s.htm'%page
inspect_url = 'http://jib.xywy.com/il_sii/inspect/%s.htm'%page
treat_url = 'http://jib.xywy.com/il_sii/treat/%s.htm'%page
food_url = 'http://jib.xywy.com/il_sii/food/%s.htm'%page
drug_url = 'http://jib.xywy.com/il_sii/drug/%s.htm'%page
data = {}
data['url'] = basic_url
data['basic_info'] = self.basicinfo_spider(basic_url)
data['cause_info'] = self.common_spider(cause_url)
data['prevent_info'] = self.common_spider(prevent_url)
data['symptom_info'] = self.symptom_spider(symptom_url)
data['inspect_info'] = self.inspect_spider(inspect_url)
data['treat_info'] = self.treat_spider(treat_url)
data['food_info'] = self.food_spider(food_url)
data['drug_info'] = self.drug_spider(drug_url)
print(page, basic_url)
self.col.insert(data)
except Exception as e:
print(e, page)
return
'''基本信息解析'''
def basicinfo_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
title = selector.xpath('//title/text()')[0]
category = selector.xpath('//div[@class="wrap mt10 nav-bar"]/a/text()')
desc = selector.xpath('//div[@class="jib-articl-con jib-lh-articl"]/p/text()')
ps = selector.xpath('//div[@class="mt20 articl-know"]/p')
infobox = []
for p in ps:
info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0', '').replace(' ', '').replace('\t','')
infobox.append(info)
basic_data = {}
basic_data['category'] = category
basic_data['name'] = title.split('的简介')[0]
basic_data['desc'] = desc
basic_data['attributes'] = infobox
return basic_data
'''treat_infobox治疗解析'''
def treat_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
ps = selector.xpath('//div[starts-with(@class,"mt20 articl-know")]/p')
infobox = []
for p in ps:
info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0', '').replace(' ', '').replace('\t','')
infobox.append(info)
return infobox
'''treat_infobox治疗解析'''
def drug_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
drugs = [i.replace('\n','').replace('\t', '').replace(' ','') for i in selector.xpath('//div[@class="fl drug-pic-rec mr30"]/p/a/text()')]
return drugs
'''food治疗解析'''
def food_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
divs = selector.xpath('//div[@class="diet-img clearfix mt20"]')
try:
food_data = {}
food_data['good'] = divs[0].xpath('./div/p/text()')
food_data['bad'] = divs[1].xpath('./div/p/text()')
food_data['recommand'] = divs[2].xpath('./div/p/text()')
except:
return {}
return food_data
'''症状信息解析'''
def symptom_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
symptoms = selector.xpath('//a[@class="gre" ]/text()')
ps = selector.xpath('//p')
detail = []
for p in ps:
info = p.xpath('string(.)').replace('\r','').replace('\n','').replace('\xa0', '').replace(' ', '').replace('\t','')
detail.append(info)
symptoms_data = {}
symptoms_data['symptoms'] = symptoms
symptoms_data['symptoms_detail'] = detail
return symptoms, detail
'''检查信息解析'''
def inspect_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
inspects = selector.xpath('//li[@class="check-item"]/a/@href')
return inspects
'''通用解析模块'''
def common_spider(self, url):
html = self.get_html(url)
selector = etree.HTML(html)
ps = selector.xpath('//p')
infobox = []
for p in ps:
info = p.xpath('string(.)').replace('\r', '').replace('\n', '').replace('\xa0', '').replace(' ','').replace('\t', '')
if info:
infobox.append(info)
return '\n'.join(infobox)
'''检查项抓取模块'''
def inspect_crawl(self):
for page in range(1, 3685):
try:
url = 'http://jck.xywy.com/jc_%s.html'%page
html = self.get_html(url)
data = {}
data['url']= url
data['html'] = html
self.db['jc'].insert(data)
print(url)
except Exception as e:
print(e)
handler = CrimeSpider()
handler.inspect_crawl()
\ No newline at end of file
#!/usr/bin/env python3
# coding: utf-8
# File: maxmatch.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-3-26
class CutWords:
def __init__(self):
dict_path = './disease.txt'
self.word_dict, self.max_wordlen = self.load_words(dict_path)
# 加载词典
def load_words(self, dict_path):
words = list()
max_len = 0
for line in open(dict_path):
wd = line.strip()
if not wd:
continue
if len(wd) > max_len:
max_len = len(wd)
words.append(wd)
return words, max_len
# 最大向前匹配
def max_forward_cut(self, sent):
# 1.从左向右取待切分汉语句的m个字符作为匹配字段,m为大机器词典中最长词条个数。
# 2.查找大机器词典并进行匹配。若匹配成功,则将这个匹配字段作为一个词切分出来。
cutlist = []
index = 0
while index < len(sent):
matched = False
for i in range(self.max_wordlen, 0, -1):
cand_word = sent[index: index + i]
if cand_word in self.word_dict:
cutlist.append(cand_word)
matched = True
break
# 如果没有匹配上,则按字符切分
if not matched:
i = 1
cutlist.append(sent[index])
index += i
return cutlist
# 最大向后匹配
def max_backward_cut(self, sent):
# 1.从右向左取待切分汉语句的m个字符作为匹配字段,m为大机器词典中最长词条个数。
# 2.查找大机器词典并进行匹配。若匹配成功,则将这个匹配字段作为一个词切分出来。
cutlist = []
index = len(sent)
max_wordlen = 5
while index > 0:
matched = False
for i in range(self.max_wordlen, 0, -1):
tmp = (i + 1)
cand_word = sent[index - tmp: index]
# 如果匹配上,则将字典中的字符加入到切分字符中
if cand_word in self.word_dict:
cutlist.append(cand_word)
matched = True
break
# 如果没有匹配上,则按字符切分
if not matched:
tmp = 1
cutlist.append(sent[index - 1])
index -= tmp
return cutlist[::-1]
# 双向最大向前匹配
def max_biward_cut(self, sent):
# 双向最大匹配法是将正向最大匹配法得到的分词结果和逆向最大匹配法的到的结果进行比较,从而决定正确的分词方法。
# 启发式规则:
# 1.如果正反向分词结果词数不同,则取分词数量较少的那个。
# 2.如果分词结果词数相同 a.分词结果相同,就说明没有歧义,可返回任意一个。 b.分词结果不同,返回其中单字较少的那个。
forward_cutlist = self.max_forward_cut(sent)
backward_cutlist = self.max_backward_cut(sent)
count_forward = len(forward_cutlist)
count_backward = len(backward_cutlist)
def compute_single(word_list):
num = 0
for word in word_list:
if len(word) == 1:
num += 1
return num
if count_forward == count_backward:
if compute_single(forward_cutlist) > compute_single(backward_cutlist):
return backward_cutlist
else:
return forward_cutlist
elif count_backward > count_forward:
return forward_cutlist
else:
return backward_cutlist
This diff is collapsed.
This diff is collapsed.
import base64
from gmssl import sm4
class CryptSM4:
def __init__(self, key):
self.crypt_sm4 = sm4.CryptSM4() # 实例化
self.key = key
def encryptSM4(self, value):
crypt_sm4 = self.crypt_sm4
crypt_sm4.set_key(self.key.encode(), sm4.SM4_ENCRYPT) # 设置密钥
date_str = str(value)
encrypt_value = crypt_sm4.crypt_ecb(date_str.encode()) # 开始加密。bytes类型
return base64.b64encode(encrypt_value).decode()
def decryptSM4(self, encrypt_value):
crypt_sm4 = self.crypt_sm4
crypt_sm4.set_key(self.key.encode(), sm4.SM4_DECRYPT) # 设置密钥
decrypt_value = crypt_sm4.crypt_ecb(base64.b64decode(encrypt_value)) # 开始解密。十六进制类型
return decrypt_value.decode()
from enum import Enum
class word_type_enum(Enum):
DISEASE = 'disease'
DEPARTMENT = 'department'
CHECK = 'check'
SYMPTOM = 'symptom'
PRODUCER = 'producer'
FOOD = 'food'
FOOD_MATERIAL = 'foodMaterial'
FOOD_PACKAGE = 'foodPackage'
FOOD_PROCESS = 'foodProcess'
COOKING_STYLE = 'cookingStyle'
INDEX_LEVEL = 'indexLevel'
PROVINCE = 'province'
NUTRIENT = 'nutrient'
EXERCISE = 'exercise'
DRUG = 'drug'
class question_type_enum(Enum):
OTHERS = 'others'
DISEASE_SYMPTOM = 'disease_symptom'
SYMPTOM_DISEASE = 'symptom_disease'
DISEASE_CAUSE = 'disease_cause'
DISEASE_ACOMPANY = 'disease_acompany'
DISEASE_NOT_FOOD = 'disease_not_food'
DISEASE_DO_FOOD = 'disease_do_food'
FOOD_NOT_DISEASE = 'food_not_disease'
FOOD_DO_DISEASE = 'food_do_disease'
DISEASE_DRUG = 'disease_drug'
DRUG_DISEASE = 'drug_disease'
DISEASE_CHECK = 'disease_check'
CHECK_DISEASE = 'check_disease'
DISEASE_PREVENT = 'disease_prevent'
DISEASE_LASTTIME = 'disease_lasttime'
DISEASE_CUREWAY = 'disease_cureway'
DISEASE_CUREPROB = 'disease_cureprob'
DISEASE_EASYGET = 'disease_easyget'
DISEASE_DESC = 'disease_desc'
FOODMATERIAL_NOT_CROWD = 'foodMaterial_not_crowd'
FOODMATERIAL_DO_CROWD = 'foodMaterial_do_crowd'
FOODPROCESS_NOT_CROWD = 'foodProcess_not_crowd'
FOODPROCESS_DO_CROWD = 'foodProcess_do_crowd'
FOODPROCESS_LEVEL_HIGH = 'foodProcess_level_high'
FOODPROCESS_LEVEL_MID = 'foodProcess_level_mid'
FOODPROCESS_LEVEL_LOW = 'foodProcess_level_low'
FOODMATERIAL_LEVEL_HIGH = 'foodMaterial_level_high'
FOODMATERIAL_LEVEL_MID = 'foodMaterial_level_mid'
FOODMATERIAL_LEVEL_LOW = 'foodMaterial_level_low'
INDEX_LEVEL_HIGH = 'index_level_high'
INDEX_LEVEL_MID = 'index_level_mid'
INDEX_LEVEL_LOW = 'index_level_low'
FOODPROCESS_LEVEL_CONTENT = 'foodProcess_level_content'
FOODMATERIAL_LEVEL_CONTENT = 'foodMaterial_level_content'
FOODPROCESS_COOKING_SPECIALTY = 'foodProcess_cooking_specialty'
FOODPROCESS_FOOD_SPECIALTY = 'foodProcess_food_specialty'
FOODMATERIAL_COOKING_SPECIALTY = 'foodMaterial_cooking_specialty'
FOODMATERIAL_FOOD_SPECIALTY = 'foodMaterial_food_specialty'
FOODPROCESS_PROVINCE = 'foodProcess_province'
FOODMATERIAL_PROVINCE = 'foodMaterial_province'
PROVINCE_FOOD = 'province_food'
FOODPROCESS_UNIT = 'foodProcess_unit'
FOODMATERIAL_UNIT = 'foodMaterial_unit'
FOODPROCESS_HIGH_CONT = 'foodProcess_high_cont'
FOODMATERIAL_HIGH_CONT = 'foodMaterial_high_cont'
NUTRIENT_HIGH_CONT = 'nutrient_high_cont'
FOODPROCESS_LOW_CONT = 'foodProcess_low_cont'
FOODMATERIAL_LOW_CONT = 'foodMaterial_low_cont'
NUTRIENT_LOW_CONT = 'nutrient_low_cont'
FOODPROCESS_ALIAS = 'foodProcess_alias'
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment