对比输出到summary

This commit is contained in:
xiaobulu27 2023-08-24 17:07:18 +08:00
parent 32e0934792
commit 9a8afacbfb
1 changed files with 58 additions and 41 deletions

99
main.py
View File

@ -1,11 +1,11 @@
import pandas as pd import pandas as pd
import os import os
import html2text import html2text
import sys
current_dir = os.getcwd() current_dir = os.getcwd()
wechat_dir = os.path.join(current_dir, 'wechat_dir') wechat_dir = os.path.join(current_dir, 'wechat_dir')
web_dir = os.path.join(current_dir, 'web_dir') web_dir = os.path.join(current_dir, 'web_dir')
output_dir = os.path.join(current_dir, 'summary')
df_s = pd.read_excel('biao.xlsx', sheet_name='筛查内容') df_s = pd.read_excel('biao.xlsx', sheet_name='筛查内容')
def trans_to_json(): def trans_to_json():
@ -14,9 +14,8 @@ def trans_to_json():
f.write(json_str) f.write(json_str)
def make_wechat_articles_full(): def make_wechat_articles_full():
df = pd.read_csv(os.path.join(wechat_dir, 'articles.csv'), encoding='gb18030') df = pd.read_excel(os.path.join(wechat_dir, 'articles.xlsx'))
df['content'] = '' df['content'] = ''
ind = 0
for ind, row in df.iterrows(): for ind, row in df.iterrows():
full_path = os.path.join(wechat_dir, row['nickname'], row['id'] + '.html') full_path = os.path.join(wechat_dir, row['nickname'], row['id'] + '.html')
try: try:
@ -27,7 +26,6 @@ def make_wechat_articles_full():
print(f'{ind}--{row["nickname"]}--{row["title"]}') print(f'{ind}--{row["nickname"]}--{row["title"]}')
except: except:
print(full_path + '---不存在') print(full_path + '---不存在')
ind +=1
output_path = os.path.join(wechat_dir, 'articles_full.csv') output_path = os.path.join(wechat_dir, 'articles_full.csv')
df.to_csv(output_path, encoding='utf-8_sig') df.to_csv(output_path, encoding='utf-8_sig')
@ -35,56 +33,75 @@ def ana_wechat():
articles_full_path = os.path.join(wechat_dir, 'articles_full.csv') articles_full_path = os.path.join(wechat_dir, 'articles_full.csv')
if not os.path.exists(articles_full_path): if not os.path.exists(articles_full_path):
make_wechat_articles_full() make_wechat_articles_full()
df_a = pd.DataFrame(columns = ['公众号', '标题', '地址', '错误表述', '建议修改词语', '错误分类'])
df = pd.read_csv(articles_full_path) df = pd.read_csv(articles_full_path)
df['content'] = df['content'].fillna('') df['content'] = df['content'].fillna('')
ind = 0
need_save = False output_data = []
index = 1
for ind, row in df_s.iterrows(): for ind, row in df_s.iterrows():
mask = df['content'].str.contains(row['错误表述']) mask = df['content'].str.contains(row['错误表述'])
result = df[mask] result = df[mask]
if result.empty:
continue if not result.empty:
ind2 = 0 for ind2, row2 in result.iterrows():
for ind2, row2 in result.iterrows(): output_row = [
alist = [row2['nickname'], row2['title'], row2['content_url'], row['错误表述'], row['建议修改词语'], row['错误分类']] index,
print(alist) row2['nickname'],
df_a.loc[len(df_a.index)] = alist row2['title'],
if need_save is False: row['错误表述'],
need_save = True row['建议修改词语'],
ind2 +=1 row['错误分类'],
ind +=1 row2['content_url']
if need_save: ]
df_a.to_csv('ana_wechat.csv', encoding='utf-8_sig') output_data.append(output_row)
index += 1
output_data.insert(0, ['序号', '信源名称', '文章标题', '错误表述', '建议修改词语', '错误分类', '原文链接'])
return output_data
def ana_web(): def ana_web():
df_a = pd.DataFrame(columns = ['单位', '主办', '地址', '错误表述', '建议修改词语', '错误分类']) output_data = []
need_save = False index = 1
for file in os.listdir(web_dir): for file in os.listdir(web_dir):
full_path = os.path.join(web_dir, file) full_path = os.path.join(web_dir, file)
if os.path.getsize(full_path) > 0: if os.path.getsize(full_path) > 0:
df = pd.read_excel(os.path.join(web_dir, file)) df = pd.read_excel(os.path.join(web_dir, file))
ind = 0
for ind, row in df_s.iterrows(): for ind, row in df_s.iterrows():
mask = df['text'].str.contains(row['错误表述'], na=False) mask = df['text'].str.contains(row['错误表述'], na=False)
result = df[mask] result = df[mask]
if result.empty: if not result.empty:
continue for ind2, row2 in result.iterrows():
ind2 = 0 output_row = [
for ind2, row2 in result.iterrows(): index,
alist = [row2['group'], row2['name'], row2['url'], row['错误表述'], row['建议修改词语'], row['错误分类']] row2['name'],
print(alist) "文章标题",
df_a.loc[len(df_a.index)] = alist row['错误表述'],
if need_save is False: row['建议修改词语'],
need_save = True row['错误分类'],
ind2 +=1 row2['content_url']
ind +=1 ]
if need_save: output_data.append(output_row)
df_a.to_csv('ana_web.csv', encoding='utf-8_sig') index += 1
if __name__ == "__main__": output_data.insert(0, ['序号', '信源名称', '文章标题', '错误表述', '建议修改词语', '错误分类', '原文链接'])
if len(sys.argv) > 1 and sys.argv[1] == 'wechat':
ana_wechat()
else:
ana_web()
return output_data
# Run WeChat Analysis
wechat_results = ana_wechat()
# Run Web Content Analysis
web_results = ana_web()
# Save results in an Excel file with two sheets
output_excel_path = os.path.join(output_dir, '总院及下属公司官方公众号巡查结果汇总表.xlsx')
with pd.ExcelWriter(output_excel_path) as writer:
wechat_results.to_excel(writer, sheet_name='公众号', index=False)
web_results.to_excel(writer, sheet_name='网站', index=False)
print("Analysis completed and results saved to Excel.")