https://blog.csdn.net/weixin_39695028/article/details/80299078
import requests
from lxml import html
import sys
from you_get import common as you_get
import urllib.request
import urllib.request as urllib2
import re
from urllib import parse
import os
import sys
from you_get import common as you_get #导入you-get库
import urllib
from urllib import request
import requests
from bs4 import BeautifulSoup
import json
from bs4 import BeautifulSoup
from six.moves import urllib
postData={
'userid': 'llabc199262',
'pwd': 'llabc199262'
}
session_requests = requests.session()
login_url = "http://club.dearedu.com/member/index.php"
result = session_requests.get(login_url)
result = session_requests.post(
login_url,
data = postData,
headers = dict(referer=login_url)
)
import urllib.request
url1 = "http://club.dearedu.com/member/index.php"
headers = {'User-Agent': "Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
'cookie': "__jsluid_h=8e388e30d16bb1e10f73865f9a77e413; UM_distinctid=16bbf9691e24c3-0a6e0f5e007e47-3c604504-1fa400-16bbf9691e33c2; guid=42770979.3940818425674125000.1562312902694.094; Hm_lvt_482d1a083f7aef77357d5a920988d5ac=1562287444,1562334791,1563864719; PHPSESSID=gu3663kt6ao1gh67nbh6d9cqg0; DedeUserID=9058331; DedeUserIDckMd5=63c93468fbe47b82; DedeLoginTime=1563865398; DedeLoginTime__ckMd5=53ce53c6df5208fd; UCenter_username=llabc199262; UCenter_uid=9056889; Example_auth=8b2eRflplt%2FzXmqHdAmQ4QwVNJ14wO1lJNnWLUr9EsZnXAxyIMu%2Fcdi8wB%2B5Zdf%2B; monitor_count=2; Hm_lpvt_482d1a083f7aef77357d5a920988d5ac=1563865400"}
req = urllib.request.Request(url=url1, headers=headers)
response = urllib.request.urlopen(req)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.html", 'w', encoding='utf-8') as f:
f.write(response.read().decode('gbk'))
url2 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=5408682&price=0"
url3 = "https://dl.glzy8.com/upfiles/soft/2019/0527/jijianhuisefenggongzuojihuazongjiehuibaopptmb.rar"
req1 = urllib.request.Request(url=url3, headers=headers)
response1 = urllib.request.urlopen(req1)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.html", 'w') as f:
# f.write(response1.read().decode('gbk', 'ignore').replace(u'\0xb1', u''))
# f.write(response1.read().decode('gbk', 'ignore').replace(u'\0xe7', u''))
麻烦大神帮忙看看,http://club.dearedu.com/member/index.php是一个教育网站,我用cookie登录网站之后获取了下载链接,但是下载不了文件
toPath = r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网"
try:
for k in range(5408682,1000000000000):
url1 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=" + str(k) +"&price=0"
print(url1)
sys.argv = ['you-get', '-o', toPath, url1]
you_get.main()
except:
pass
def bCrawler(url):
req = urllib.request.Request(url=url, headers=headers) # 拿出的是字典
response1 = urllib.request.urlopen(req)
# HtmlStr = response1.read().encode('utf-8', 'ignore').replace(u'\0xa3', u'')
HtmlStr = response1.read().decode('utf-8', 'ignore').replace(u'\0xa3', u'')
# print(HtmlStr)
pat = r'zid=(.*?)&price=0" class="layui-btn">'
re_img = re.compile(pat,re.S)
bList = re_img.findall(HtmlStr)
print(bList)
print(len(bList))
# try:
for bUrl in bList:
url5 = "http://club.dearedu.com/member/down_gb_iweike.php?zid=" + bUrl + "&price=0"
print(url5)
f1 = requests.get(url5,headers=headers)
with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.doc","a+") as f: #encoding="utf-8"
f.write(f1.content)
# urllib.request.urlretrieve(url5,r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\sina.docx",data=headers)
# except:
# pass
# with open(r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网\a.docx","w") as f: #encoding="utf-8"
# f.write(HtmlStr)
#url中如果有中文字符必须进行解码
url = r"http://s.dearedu.com/?page=2&rows=10&attr_free=1"
toPath = r"D:\SW file\pycharm file\400集\爬虫\教育文档\第二教育网"#image文件夹必须先建立
bCrawler(url, toPath)
num = 0
for i in range(556, 12204):
url = "http://s.dearedu.com/?page=" + str(i) + "&rows=10&attr_free=1"
print(url)
print(i)
bCrawler(url)