from bs4 import BeautifulSoup #网页解析,获取数据
import re #正则表达式,进行文字匹配
import urllib.request,urllib.error #制定URL,获取网页数据
import xlwt #进行excel操作
import sqlite3 #进行SQLite数据库操作
def main():
baserl = 'https://movie.douban.com/top250?start='
url1 = getat(baserl)
fike = re.compile(r'<a href="(.*?)">')
def getat(baserl):
for i in range(0,10):
url = baserl+str(25*i)
html = gat(url)
soup = BeautifulSoup(html,'html.parser')
for item in soup('div',class_='item'):
item = str(item)
save = []
like = re.findall(fike,item)[0]
print(like)
def gat(url):
# global html
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48"
''}
a =urllib.request.Request(url,headers=head)
html = ''
try:
response = urllib.request.urlopen(a)
html = response.read().docode('utf-8')
except :
print('14')
return html
if __name__ == '__main__':
main()
print('爬完')
简单的except报错问题,次代码一直报错无法正常获取网址
- 写回答
- 好问题 0 提建议
- 追加酬金
- 关注问题
- 邀请回答
-
2条回答 默认 最新
- 一只爱编程的书虫 2021-09-20 19:15关注
分析过程:
使用以下代码,可以追踪错误信息。from bs4 import BeautifulSoup #网页解析,获取数据 import re #正则表达式,进行文字匹配 import urllib.request,urllib.error #制定URL,获取网页数据 import xlwt #进行excel操作 import sqlite3 #进行SQLite数据库操作 def main(): baserl = 'https://movie.douban.com/top250?start=' url1 = getat(baserl) fike = re.compile(r'<a href="(.*?)">') def getat(baserl): for i in range(0,10): url = baserl+str(25*i) html = gat(url) soup = BeautifulSoup(html,'html.parser') for item in soup('div',class_='item'): item = str(item) save = [] like = re.findall(fike,item)[0] print(like) def gat(url): # global html head = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48" ''} a =urllib.request.Request(url,headers=head) html = '' try: response = urllib.request.urlopen(a) html = response.read().docode('utf-8') except Exception as e: print(e) return html if __name__ == '__main__': main() print('爬完')
输出:
'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 'bytes' object has no attribute 'docode' 爬完
一看就知道是打错了。
改正后代码:from bs4 import BeautifulSoup #网页解析,获取数据 import re #正则表达式,进行文字匹配 import urllib.request,urllib.error #制定URL,获取网页数据 import xlwt #进行excel操作 import sqlite3 #进行SQLite数据库操作 def main(): baserl = 'https://movie.douban.com/top250?start=' url1 = getat(baserl) fike = re.compile(r'<a href="(.*?)">') def getat(baserl): for i in range(0,10): url = baserl+str(25*i) html = gat(url) soup = BeautifulSoup(html,'html.parser') for item in soup('div',class_='item'): item = str(item) save = [] like = re.findall(fike,item)[0] print(like) def gat(url): # global html head = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36 Edg/91.0.864.48" ''} a =urllib.request.Request(url,headers=head) html = '' try: response = urllib.request.urlopen(a) html = response.read().decode('utf-8') except Exception as e: print(e) return html if __name__ == '__main__': main() print('爬完')
本人实测可正常执行。
本回答被题主选为最佳回答 , 对您是否有帮助呢?解决 无用评论 打赏 举报
悬赏问题
- ¥15 关于#java#的问题:找一份能快速看完mooc视频的代码
- ¥15 这种微信登录授权 谁可以做啊
- ¥15 请问我该如何添加自己的数据去运行蚁群算法代码
- ¥20 用HslCommunication 连接欧姆龙 plc有时会连接失败。报异常为“未知错误”
- ¥15 网络设备配置与管理这个该怎么弄
- ¥20 机器学习能否像多层线性模型一样处理嵌套数据
- ¥20 西门子S7-Graph,S7-300,梯形图
- ¥50 用易语言http 访问不了网页
- ¥50 safari浏览器fetch提交数据后数据丢失问题
- ¥15 matlab不知道怎么改,求解答!!