python爬取需要登录的网站并获取下载链接进行下载 5C

import requests
from lxml import html
import sys
from you_get import common as you_get
import urllib.request

import urllib.request as urllib2

import re
from urllib import parse
import os
import sys
from you_get import common as you_get #导入you-get库
import urllib
from urllib import request
import requests
from bs4 import BeautifulSoup
import json
from bs4 import BeautifulSoup

from six.moves import urllib

#从登录页面携带cookie进行登录
import urllib.request
url1 = "http://club.dearedu.com/member/index.php"
headers = {'User-Agent': "Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
'cookie': "__jsluid_h=8e388e30d16bb1e10f73865f9a77e413; UM_distinctid=16bbf9691e24c3-0a6e0f5e007e47-3c604504-1fa400-16bbf9691e33c2; guid=42770979.3940818425674125000.1562312902694.094; Hm_lvt_482d1a083f7aef77357d5a920988d5ac=1562287444,1562334791,1563864719; PHPSESSID=gu3663kt6ao1gh67nbh6d9cqg0; DedeUserID=9058331; DedeUserIDckMd5=63c93468fbe47b82; DedeLoginTime=1563865398; DedeLoginTime__ckMd5=53ce53c6df5208fd; UCenter_username=llabc199262; UCenter_uid=9056889; Example_auth=8b2eRflplt%2FzXmqHdAmQ4QwVNJ14wO1lJNnWLUr9EsZnXAxyIMu%2Fcdi8wB%2B5Zdf%2B; monitor_count=2; Hm_lpvt_482d1a083f7aef77357d5a920988d5ac=1563865400"}
req = urllib.request.Request(url=url1, headers=headers)
response = urllib.request.urlopen(req)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.html", 'w', encoding='utf-8') as f:
f.write(response.read().decode('gbk'))

#登录成功后进入相关页面,爬取页面中的下载地址并进行下载
def bCrawler(url):
req = urllib.request.Request(url=url, headers=headers) # 拿出的是字典
response1 = urllib.request.urlopen(req)
# HtmlStr = response1.read().encode('utf-8', 'ignore').replace(u'\0xa3', u'')
HtmlStr = response1.read().decode('utf-8', 'ignore').replace(u'\0xa3', u'')
# print(HtmlStr)
pat = r'zid=(.*?)&price=0" class="layui-btn">'
re_img = re.compile(pat,re.S)
bList = re_img.findall(HtmlStr)
print(bList)
print(len(bList))

for bUrl in bList:
    url5 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=" + bUrl + "&price=0"
    print(url5)
    f1 = requests.get(url5,headers=headers)
    with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.doc","a+") as f:    #encoding="utf-8"
        f.write(f1.content)

url = r"http://s.dearedu.com/?page=2&rows=10&attr_free=1"
for i in range(556, 12204):
url = "http://s.dearedu.com/?page=" + str(i) + "&rows=10&attr_free=1"
print(url)
print(i)
bCrawler(url)

此代码目前存在的问题:登陆成功,下载链接也爬取了,但是下载不了,下载的时候用的f1 = requests.get(url5,headers=headers),也已经带上cookie了,不知道为什么不行,请各位帮忙想想办法,悬赏好说

1个回答

caokaishui
HS_ldy b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>\x00\x03\x00\xfe\xff\t\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\xda\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\xdc\x00\x00\x00\x01\x00\x00\x00\xfe\xff\xff\xff\x00\x00\x00\x00\xd8\x00\x00\x00\xd9\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff这个是二进制的吗
大约一年之前 回复
caokaishui
HS_ldy 你好,看了你写的连接后,文件下载下来了,但是打不开,用f2 = urllib.request.urlopen(url=req2).read().decode('gbk')和w写入时就直接文件内容有问题打不开,用f2 = urllib.request.urlopen(url=req2).read()然后用wb写入时候说是文件后缀和文件格式不匹配
大约一年之前 回复
Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
立即提问