import requests
import wordcloud
import jieba
from bs4 import BeautifulSoup
from matplotlib import pyplot as plt
from pylab import mpl
#设置字体
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
url = 'https://s.weibo.com/top/summary?Refer=top_hottopnav=1wvr=6'
try:
#获取数据
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text,'html.parser')
data = soup.find_all('a')
d_list = []
for item in data:
d_list.append(item.text)
words = d_list[4:-11:]
#中文分词
result = list(jieba.cut(words[0]))
for word in words[1::]:
result.extend(jieba.cut(word))
redata = []
for it in result:
if len(it) = 1:
continue
else:
redata.append(it)
result_str = ' '.join(redata)
#输出词云图
font = r'C:\Windows\Fonts\simhei.ttf'
w = wordcloud.WordCloud(font_path=font,width=600,height=400)
w.generate(result_str)
w.to_file('微博热搜关键词词云.png')
key = list(set(redata))
x,y = [],[]
#筛选数据
for st in key:
count = redata.count(st)
if count = 1:
continue
else:
x.append(st)
y.append(count)
x.sort()
y.sort()
#绘制结果图
plt.plot(x,y)
plt.show()
except Exception as e:
print(e)
到此这篇关于Python爬虫分析微博热搜关键词的文章就介绍到这了,更多相关Python爬虫微博热搜内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!