从象牙塔到雷峰塔 2019-07-23 16:12 采纳率: 0%
浏览 2108
已结题

爬取需要登录的网站,登陆之后跳转到下载页面,获取下载链接并下载,现在问题是登录了但是下载不了

https://blog.csdn.net/weixin_39695028/article/details/80299078

import requests
from lxml import html
import sys
from you_get import common as you_get
import urllib.request

import urllib.request as urllib2

import re
from urllib import parse
import os
import sys
from you_get import common as you_get #导入you-get库
import urllib
from urllib import request
import requests
from bs4 import BeautifulSoup
import json
from bs4 import BeautifulSoup

from six.moves import urllib

postData={

'userid': 'llabc199262',

'pwd': 'llabc199262'

}

session_requests = requests.session()

login_url = "http://club.dearedu.com/member/index.php"

result = session_requests.get(login_url)

result = session_requests.post(

login_url,

data = postData,

headers = dict(referer=login_url)

)

import urllib.request
url1 = "http://club.dearedu.com/member/index.php"
headers = {'User-Agent': "Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
'cookie': "__jsluid_h=8e388e30d16bb1e10f73865f9a77e413; UM_distinctid=16bbf9691e24c3-0a6e0f5e007e47-3c604504-1fa400-16bbf9691e33c2; guid=42770979.3940818425674125000.1562312902694.094; Hm_lvt_482d1a083f7aef77357d5a920988d5ac=1562287444,1562334791,1563864719; PHPSESSID=gu3663kt6ao1gh67nbh6d9cqg0; DedeUserID=9058331; DedeUserIDckMd5=63c93468fbe47b82; DedeLoginTime=1563865398; DedeLoginTime__ckMd5=53ce53c6df5208fd; UCenter_username=llabc199262; UCenter_uid=9056889; Example_auth=8b2eRflplt%2FzXmqHdAmQ4QwVNJ14wO1lJNnWLUr9EsZnXAxyIMu%2Fcdi8wB%2B5Zdf%2B; monitor_count=2; Hm_lpvt_482d1a083f7aef77357d5a920988d5ac=1563865400"}
req = urllib.request.Request(url=url1, headers=headers)
response = urllib.request.urlopen(req)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.html", 'w', encoding='utf-8') as f:
f.write(response.read().decode('gbk'))

url2 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=5408682&price=0"

url3 = "https://dl.glzy8.com/upfiles/soft/2019/0527/jijianhuisefenggongzuojihuazongjiehuibaopptmb.rar"

req1 = urllib.request.Request(url=url3, headers=headers)

response1 = urllib.request.urlopen(req1)

with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.html", 'w') as f:

# f.write(response1.read().decode('gbk', 'ignore').replace(u'\0xb1', u''))
# f.write(response1.read().decode('gbk', 'ignore').replace(u'\0xe7', u''))

麻烦大神帮忙看看,http://club.dearedu.com/member/index.php是一个教育网站,我用cookie登录网站之后获取了下载链接,但是下载不了文件

toPath = r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网"

try:

for k in range(5408682,1000000000000):

url1 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=" + str(k) +"&price=0"

print(url1)

sys.argv = ['you-get', '-o', toPath, url1]

you_get.main()

except:

pass

def bCrawler(url):
req = urllib.request.Request(url=url, headers=headers) # 拿出的是字典
response1 = urllib.request.urlopen(req)
# HtmlStr = response1.read().encode('utf-8', 'ignore').replace(u'\0xa3', u'')
HtmlStr = response1.read().decode('utf-8', 'ignore').replace(u'\0xa3', u'')
# print(HtmlStr)
pat = r'zid=(.*?)&price=0" class="layui-btn">'
re_img = re.compile(pat,re.S)
bList = re_img.findall(HtmlStr)
print(bList)
print(len(bList))
# try:
for bUrl in bList:
url5 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=" + bUrl + "&price=0"
print(url5)
f1 = requests.get(url5,headers=headers)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.doc","a+") as f: #encoding="utf-8"
f.write(f1.content)

    # urllib.request.urlretrieve(url5,r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\sina.docx",data=headers)

# except:
#     pass


# with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.docx","w") as f:    #encoding="utf-8"
#     f.write(HtmlStr)

#url中如果有中文字符必须进行解码
url = r"http://s.dearedu.com/?page=2&rows=10&attr_free=1"

toPath = r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网"#image文件夹必须先建立

bCrawler(url, toPath)

num = 0
for i in range(556, 12204):
url = "http://s.dearedu.com/?page=" + str(i) + "&rows=10&attr_free=1"
print(url)
print(i)
bCrawler(url)

此代码目前存在的问题:登陆成功但是不能下载

  • 写回答

3条回答 默认 最新

  • oyljerry 2019-07-23 17:05
    关注

    你下载的request中有没有带上对应的cookie等信息,不然服务器还是可能认为你没有认证

    cookie不能直接用字符串,解析处理一下

    from http.cookies import SimpleCookie
    
    rawdata = 'Cookie: devicePixelRatio=1; ident=exists; __utma=13103r6942.2918; __utmc=13103656942; __utmz=13105942.1.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); mp_3cb27825a6612988r46d00tinct_id%22%3A%201752338%2C%22%24initial_referrer%22%3A%20%22https%3A%2F%2Fwww.pion_created_at%22%3A%20%222015-08-03%22%2C%22platform%22%3A%20%22web%22%2C%%22%3A%20%%22%7D; t_session=BAh7DUkiD3Nlc3NpbWVfZV9uYW1lBjsARkkiH1BhY2lmaWMgVGltZSAoVVMgJiBDYW5hZGEpBjsAVEkiFXNpZ25pbl9wZXJzb25faWQGOwBGaQMSvRpJIhRsYXN0X2xvZ2luX2RhdGUGOwBGVTogQWN0aXZlU3VwcG9ydDo6VGltZVdpdGhab25lWwhJdToJVGltZQ2T3RzAAABA7QY6CXpvbmVJIghVVEMGOwBUSSIfUGFjaWZpZWRfZGFzaGJvYXJkX21lc3NhZ2UGOwBGVA%3D%3D--6ce6ef4bd6bc1a469164b6740e7571c754b31cca'
    cookie = SimpleCookie()
    cookie.load(rawdata)
    
    # Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
    # which is incompatible with requests. Manually construct a dictionary instead.
    cookies = {}
    for key, morsel in cookie.items():
        cookies[key] = morsel.value
    
    评论

报告相同问题?

悬赏问题

  • ¥15 C#算法问题, 不知道怎么处理这个数据的转换
  • ¥15 YoloV5 第三方库的版本对照问题
  • ¥15 请完成下列相关问题!
  • ¥15 drone 推送镜像时候 purge: true 推送完毕后没有删除对应的镜像,手动拷贝到服务器执行结果正确在样才能让指令自动执行成功删除对应镜像,如何解决?
  • ¥15 求daily translation(DT)偏差订正方法的代码
  • ¥15 js调用html页面需要隐藏某个按钮
  • ¥15 ads仿真结果在圆图上是怎么读数的
  • ¥20 Cotex M3的调试和程序执行方式是什么样的?
  • ¥20 java项目连接sqlserver时报ssl相关错误
  • ¥15 一道python难题3