python代码:
from downloader import Downloader #, cStringIO, cPickle
from threading import Thread
from time import sleep
import log0 as log
from os.path import basename
import requests as req
import pickle
from os.path import exists
db='E:/tmp/download.data'
def append(obj):
try:
if exists(db):
with open(db,'rb') as f:
data=pickle.load(f)
else: data={}
except:
data={}
data[obj['url']]=obj
with open(db,'wb') as f:
pickle.dump(data,f)
def load(url):
if not exists(db): return None
try:
with open(db,'rb') as f:
data=pickle.load(f)
return data.get(url)
except:
return None
def out(msg):
print(msg)
import time
from os.path import basename, exists, getsize
from queue import Queue
from threading import Lock, Thread, current_thread
import requests as req
import random as rand
import conf
class Downloader:
KB=1024
MB=KB*KB
GB=KB*MB
range_size=MB
max_workers=10
spd_refresh_interval=1
user_agents=[
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36'
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
]
chunk_size=KB
max_error=0.1 #单线程允许最大出错率
max_error_one_worker=0.5 #仅剩一个线程时允许的最大出错率
home='E:/tmp/' #下载目录
def __init__(self,c):
self.__locks={i:Lock() for i in ('file','worker_info','itr_job','download_info')}
self.__config=c
self.__alive=False
self.__fails=Queue()
self.__conf=c
c=conf.load(c['url'])
if c:
self.__conf=c
self.__init_from_conf()
else: self.__init_task()
def __init_from_conf(self):
self.__download_offset=self.__conf['offset']
for i in self.__conf['fails']: self.__fails.put(i)
def __get_agent(self):
return self.user_agents[rand.randint(0,len(self.user_agents)-1)]
def __init_task(self):
headers={'Range':'bytes=0-0'}
headers['User-Agent']=self.__get_agent()
print(headers)
try:
r=req.get(self.__conf['url'],headers=headers,stream=True)
self.__conf['name'] = basename(self.__conf['url']) or str(int(round(time.time()*1000)))
self.__conf['206'] = r.status_code == 206 or r.headers.get('Accept-Ranges')=='bytes'
if self.__conf['206']:
self.__conf['len']=int(r.headers['Content-Range'].split('/')[-1])
elif r.status_code!=200:
log.out('init task err')
return
else:
self.__conf['len']=int(r.headers['Content-Length'])
r.close()
self.__download_offset=0
self.__conf['init']=True
except Exception as e:
log.out(e)
def __itr_job(self):
if self.__locks['itr_job'].acquire():
if not self.__fails.empty():
ans=self.__fails.get()
elif self.__download_offset<self.__conf['len']:
o=self.__download_offset
ans=(o,min(self.__conf['len']-1,o+self.range_size-1))
self.__download_offset+=self.range_size
else:
ans=(-1,-1)
self.__locks['itr_job'].release()
return ans
def __has_job(self):
if self.__locks['itr_job'].acquire():
ans=self.__download_offset<self.__conf['len'] or not self.__fails.empty()
self.__locks['itr_job'].release()
return ans
def __download_no_206(self):
headers={'User-Agent':self.__get_agent()}
r=req.get(self.__conf['url'],headers=headers,stream=True)
self.__download_offset=0
if r.status_code != 200:
r.close()
self.__stopped()
return
try:
for con in r.iter_content(chunk_size=self.chunk_size):
if self.__kill_signal: break
self.__file.write(con)
l=len(con)
self.__down_bytes+=l
self.__download_offset+=l
t0=time.time()
t=t0-self.__last_time
if t>=self.spd_refresh_interval:
self.__down_spd=self.__down_bytes/t
log.out('downspd: %d KB/s'%(self.__down_spd/self.KB))
self.__last_time=t0
self.__down_bytes=0
except:
pass
r.close()
self.__stopped()
def __download_206(self):
file_len=self.__conf['len']
total=0
error=0
kill=False
with req.session() as sess:
while True:
s,e=self.__itr_job()
if s==-1:
log.out('no job stop')
break
headers={'Range':'bytes=%d-%d'%(s,e)}
headers['User-Agent']=self.__get_agent()
try:
r=sess.get(self.__conf['url'],headers=headers,stream=True)
total+=1
if r.status_code!=206:
self.__fails.put((s,e))
error+=1
if error>self.max_error*total:
if self.__locks['worker_info'].acquire():
num=self.__current_workers
self.__locks['worker_info'].release()
if error>self.max_error_one_worker*total or num>1:
break
continue
for con in r.iter_content(chunk_size=self.chunk_size):
if self.__locks['worker_info'].acquire():
if self.__kill_signal:
self.__locks['worker_info'].release()
kill=True
break
self.__locks['worker_info'].release()
if self.__locks['file'].acquire():
self.__file.seek(s)
self.__file.write(con)
l=len(con)
s+=l
self.__locks['file'].release()
if self.__locks['download_info'].acquire():
self.__down_bytes+=l
t0=time.time()
t=t0-self.__last_time
if t>=self.spd_refresh_interval:
log.out('downspd: %d KB/s'%(self.__down_spd/self.KB))
self.__down_spd=self.__down_bytes/t
self.__down_bytes=0
self.__last_time=t0
self.__locks['download_info'].release()
if s<=e and s<file_len:
self.__fails.put((s,e))
if kill:
break
except :
self.__fails.put((s,e))
error+=1
if error>self.max_error*total:
if self.__locks['worker_info'].acquire():
num=self.__current_workers
self.__locks['worker_info'].release()
if error>self.max_error_one_worker*total or num>1:
break
self.__stopped()
def __start_worker(self,target):
if self.__locks['worker_info'].acquire():
if self.__kill_signal:
self.__locks['worker_info'].release()
return False
if self.__current_workers<self.max_workers:
Thread(target=target).start()
self.__current_workers+=1
log.out('new worker started,current workers %d'%self.__current_workers)
self.__locks['worker_info'].release()
return True
def __start_workers(self):
for _ in range(self.max_workers):
if not self.__start_worker(self.__download_206): break
time.sleep(0.8)
def start(self):
if self.__alive:
log.out('already started!')
return
if self.__conf.get('status')=='done':
log.out('already done')
return
self.__alive=True
self.__kill_signal=False
self.__conf['status']='working'
self.__down_bytes=0
self.__down_spd=0
self.__last_time=0
self.__current_workers=0
self.__start_time=time.time()
try:
path=self.home+self.__conf['name']
self.__file=open(path,(exists(path) and 'rb+') or 'wb' )
if not self.__conf['206']:
Thread(target=self.__start_workers).start()
else: self.__start_worker(self.__download_no_206)
log.out('starting done!')
except: log.out('starting failed')
def stop(self):
if self.__kill_signal:
return
log.out('stopping')
if self.__locks['worker_info'].acquire():
self.__kill_signal=True
if self.__conf['status']=='working':
self.__conf['status']='stopped'
self.__locks['worker_info'].release()
def __after_stopped(self):
if not self.__kill_signal:
self.__kill_signal=True
__alive=False
self.__file.close()
log.out('total time: %.2f'%(time.time()-self.__start_time))
self.__conf['offset']=self.__download_offset
if not self.__has_job():
self.__conf['status']='done'
elif self.__conf.get('status')!='stopped': self.__conf['status']='error'
leak=0
ls=[]
while not self.__fails.empty():
i=self.__fails.get()
leak+=i[1]-i[0]+1
ls.append(i)
self.__conf['fails']=ls
leak+=max(self.__conf['len']-self.__download_offset,0)
log.out('total leak: %d'%leak)
conf.append(self.__conf)
def __stopped(self):
if self.__locks['worker_info'].acquire():
self.__current_workers-=1
log.out('%s stopped'%current_thread().name)
if self.__current_workers==0:
self.__after_stopped()
self.__locks['worker_info'].release()
#!/usr/bin/env python
# coding=utf-8
#import importlib,sys
#import sys
#sys.setdefaultencoding('gbk')
'''import sys
import imp
import sys
reload(sys)
sys.setdefaultencoding('utf8')
'''
'''
import sys
sys.setdefaultencoding('utf-8')
import jieba
import json'''
def main():
from bs4 import BeautifulSoup
import urllib.request
import urllib.parse as parse
import ssl
import re
import os,os.path
import codecs
import requests
def getHtml(url):
global html
page = urllib.request.urlopen(url)
html = page.read()
return html
def file(url1,file_name,name):
print(url1)
#file(name,save_path,filename)
#url1= +'/' + filename
url1=url1.encode()
#file = open(name ,'wb+')
#file.write(url1 )
#file.close()
#print(file_name)
headers = {'Host': 'https://files.pythonhosted.org/packages/','User-Agent':'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER','Referer': 'https://pypi.org/',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8'}
#req = urllib.urlretrieve(download_url,headers=headers)
#urllib.request.urlopen('https://www.lfd.uci.edu/~gohlke/pythonlibs/')
#req = urllib.request.Request(url=url,headers=header)
#request = urllib.request.urlopen(url1)
#response = urllib.request.urlopen(request)
import socket
import urllib.request
#设置超时时间为30s
socket.setdefaulttimeout(5)
#解决下载不完全问题且避免陷入死循环
'''try:
urllib.request.urlretrieve(url1.decode(),name)
except socket.timeout:'''
count = 1
while count <= 1:
import time
# 格式化成2016-03-20 11:45:39形式
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# 格式化成Sat Mar 28 22:24:24 2016形式
print(time.strftime("%a %b %d %H:%M:%S %Y", time.localtime()))
# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print(time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y")))
try:
urllib.request.urlretrieve(url1.decode(),name)
print('\nchangshi'+str(count)+'over\n')
break
except socket.timeout:
err_info = 'Reloading for %d time'%count if count == 1 else 'Reloading for %d times'%count
print(err_info)
count += 1
except urllib.error.HTTPError:
print('urllib.error.HTTPError')
except urllib.error.URLError:
print('urllib.error.URLError')
except ssl.SSLWantReadError:
print('ssl.SSLWantReadError')
if count > 1:
print("downloading picture fialed!")
#urllib.request.urlretrieve(url1.decode(),name)
global i
i += 1
print(url1.decode())
#file = open(name ,'wt+')
#file.write(str(req.content()))
#file.close()
print(file_name)
global x
print("Completed : .... %d ..." % x)
'''for i in range(len(name_list)):
j=0
if name_list[i-24:i+1]=='https://pypi.org/project/':
name_list1.append(name_list[i+1:i+60])'''
print('\n........'+name+'..........complete\n')
'''headers = {'Host': 'download.lfd.uci.edu','User-Agent':'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER','Referer': 'https://www.lfd.uci.edu/~gohlke/pythonlibs/',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8'}
#req = urllib.urlretrieve(download_url,headers=headers)
#urllib.request.urlopen('https://www.lfd.uci.edu/~gohlke/pythonlibs/')
#req = urllib.request.Request(url=url,headers=header)
request = requests.get(url=url1,headers=headers)
#response = urllib.request.urlopen(request)
global i
i += 1
file = open(name ,'wb+')
file.write(request.content)
file.close()
print(file_name)
print("Completed : .... %d ..." % x)'''
save_path = os.getcwd()
url = 'https://www.lfd.uci.edu/'
html = getHtml(url)
html='''
</li>
<li><a id="imagecodecs-lite"></a><strong><a href="https://www.lfd.uci.edu/~gohlke/#python">Imagecodecs-lite</a></strong> (deprecated): a subset of <a href="https://www.lfd.uci.edu/~gohlke/pythonlibs/#imagecodecs">imagecodecs</a>.
<ul>
<li><a href="javascript:;" onclick=" javascript:dl([101,99,106,112,118,103,115,49,47,119,116,45,104,111,95,51,48,108,105,50,53,101,113,109,97,46,110,121,100], "5B1E23C97AFG4D0<KD05=@A9D:B?B?H6H>6:2J>:I<ID:GIJH8;@"); "javascript: dl("" title="[1 KB] [Feb 17, 2020]">imagecodecs_lite‑2020.1.31‑py3‑none‑any.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,97,51,110,111,49,45,116,106,101,99,113,105,119,50,108,95,115,48,52,100,118,56,53,47,54,112,103,104,46,57,109], "@=7:IDF6G;N0J893C89@?>;685=A4ML4=L159I1E59I1E5<;2?0NCHBL<K>"); "javascript: dl("" title="[148 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp38‑cp38‑win_amd64.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,49,47,119,100,46,48,110,99,115,50,104,45,57,51,111,108,113,97,56,112,95,106,109,103,116,105,101,53,118], "89E@CLKH1IFAGJ7>3J78D?IHJ;950<4094=;7C=B;7C=B;2I6=942:?"); "javascript: dl("" title="[120 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp38‑cp38‑win32.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,99,112,108,105,119,50,109,103,113,110,45,57,55,48,49,115,118,47,100,111,53,51,95,97,104,101,116,54,46,106,52], "?5M81@DJA36G7I0CBI0?F23JI:5=>;L>5LE:01E<:01E<6:439FG6BKNL4H2"); "javascript: dl("" title="[145 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp37‑cp37m‑win_amd64.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,57,47,55,112,101,49,105,106,115,95,99,51,116,50,110,113,53,45,48,108,97,46,118,119,109,111,104,100,103], "8=7?3F@<16HDL4:IK4:89C6<4A=B50E5=E;A:3;2A:3;2HAG6>;=EGJC"); "javascript: dl("" title="[118 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp37‑cp37m‑win32.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,115,119,50,54,47,111,112,101,48,105,103,110,100,53,109,99,45,46,97,106,51,52,118,104,95,49,116,108,113,57], "02CL6F=J49>B:7?5<7?0HK9J7@28IMAI2AD@?6D3@?6D3>@19;HB><3EA1GK"); "javascript: dl("" title="[137 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp36‑cp36m‑win_amd64.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,50,110,116,51,97,111,104,45,95,57,103,101,99,119,115,105,118,54,108,113,112,100,109,106,49,47,46,53,48], ">0GCD@K2I?F4:;<5E;<>8B?2;70LH9JH0J37<D3A7<D3AF7=?130J=6B"); "javascript: dl("" title="[112 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp36‑cp36m‑win32.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,53,49,51,113,100,108,47,52,118,54,46,106,105,109,99,57,112,103,97,101,110,115,48,95,104,119,50,45,116,111], "EJ;3@80L6>@206<=BAC>M4C>EG5<LCKJF1?:1J:2K>@20K>@20=KI<DGB=497:IH5"); "javascript: dl("" title="[133 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp35‑cp35m‑win_amd64.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,46,106,118,49,101,51,109,119,108,47,104,116,53,99,113,50,105,45,100,57,111,112,48,115,95,110,97,103], "G?1>E2<;9=E5<9@6JK4=DB4=GH8@;4A?F3C03?05A=E5<A=E5<6A7@I5?07:8"); "javascript: dl("" title="[110 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp35‑cp35m‑win32.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,54,118,48,53,115,45,112,116,110,106,51,46,100,108,99,52,109,55,50,105,47,49,104,97,119,113,103,111,101,95,57], "4B9I6137D>6BADC@GJL>K<L>4M=C7L5B2EN;EB;:5>6BA5>6BA@5HC8MG@<0?;HF="); "javascript: dl("" title="[145 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp27‑cp27m‑win_amd64.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,115,51,118,57,104,112,50,110,48,103,109,55,95,49,100,108,116,101,106,45,113,53,46,47,111,99,119,97,105], "06BD52E@GI56;GL:K9AIH>AI0<?L@AC68=3F=6F1CI56;CI56;:CJL716FJ4?"); "javascript: dl("" title="[120 KB] [Dec 04, 2019]">imagecodecs_lite‑2019.12.3‑cp27‑cp27m‑win32.whl</a></li>
<li><a href="javascript:;" onclick=" javascript:dl([101,112,116,57,114,103,106,100,122,97,101,115,46,51,111,47,48,105,99,49,108,113,53,50,109,45,118], ":F5D0IE1>@G849A=69A:HC@19HF?B2;BF;<;183;47"); "javascript: dl("" title="[1.1 MB] [Dec 04, 2019]">imagecodecs‑lite‑2019.12.3.tar.gz</a></li>
'''
print('html done')
#html.decode('utf-8')
#print(html)
'''headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1)AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'}
r = requests.get(url, headers = headers)
r.encoding = "utf-8"
soup = BeautifulSoup(r.text, "html.parser")
#html_mod=re.sub(pattern=".",repl=".",string=html.decode('utf-8'))
for link in soup.find_all('a'): #soup.find_all返回的为列表
print(link.get('href'))
#name_list+=link
'''
name_list = html#soup.find_all('a')#re.findall(r']">*-cp38-win_amd64.whl',html.decode('utf-8'))
x=1
files=os.listdir(save_path)
print(files)
print(type(name_list))
name_list=str(name_list)
name_list1=[]
#print(name_list)
#for name in name_list:
k=0
# name[k]=str(name1[k])
for i in range(len(name_list)):
j=0
if name_list[i-2:i+1]==']">':
name_list1.append(name_list[i+1:i+60])
global m
if k<len(name_list1):
for l in range(len(name_list1[k])):
if l-9>=0:
if name_list1[k][l-4:l]=='.whl' or name_list1[k][l-3:l]=='.gz' or name_list1[k][l-4:l]=='.zip':
j=1
m=l
if j==1:
name_list1[k]=name_list1[k][0:m]
k+=1
'''if j==0:
name_list.remove(name)'''
#file_name = os.path.join(save_path ,name)
i=0
#print(name)
print(name_list1)
for name in name_list1:
j=0
for l in range(len(name)):
if l-9>=0:
if name[l-4:l]=='.whl' or name[l-3:l]=='.gz' or name[l-4:l]=='.zip':
j=1
m=l
if j==1:
name=name[0:m]
k+=1
if name in files:
continue
'''if name=='Delny‑0.4.1‑cp27‑none‑win_amd64.whl</a></li>\n<li>' or name==Delny‑0.4.1‑cp27‑none‑win32.whl</a></li>
</ul>
</:
continue
'''
print('no:'+str(x))
print('\ndownload '+name)
# importlib.reload(sys)
#imp.reload(sys)
for l in range(len(name)):
if l-9>=0:
if name[l-4:l]=='.whl' or name[l-3:l]=='.gz' or name[l-4:l]=='.zip':
j=1
m=l
if j==1:
name=name[0:m]
k+=1
string='https://download.lfd.uci.edu/pythonlibs/s2jqpv5t/' + name#[0:4+name.find('.whl')]#https://download.lfd.uci.edu/pythonlibs/s2jqpv5t/
print('00'+save_path)
count=0
v=0
for p in range(len(string)):
if string[p]=='\\':
if v==0:
string=string[:6]+'//'+string[7:]
else:
string=string[:p]+'/'+string[p+1:]
v+=1
if string[p-3:p]=='win':
string=string[:p-4]+'-'+string[p-3:]
if p<len(string):
if (string[p]=='\u2011')==True:
if p+1<len(string):
string=string[:p]+'-'+string[p+1:]
'''if string[p-2]>='0' and string[p-2]<='9' and string[p-1]>='0' and string[p-1]<='9':
if (string[p]>='a'and string[p]<='z') or (string[p]>='A'and string[p]<='Z'):
string=string[:p]+string[p+1:]'''
if p>=len(string):
break
'''if name[:9]=='ad3‑2.2.1':
print('aaa')
continue'''
conf={'url':string}
d=Downloader(conf)
d.start()
#file(string,save_path,name)
x=x+1
print('09'+name_list)
print('finished')
if __name__ == '__main__':
main()
报错:
======================== RESTART: E:\2345Downloads\44.py =======================
Warning: This project has moved to logzero (see https://github.com/metachris/logzero)
html done
['imagecodecs_lite‑2020.1.31‑py3‑none‑any.whl', 'imagecodecs_lite‑2019.12.3‑cp38‑cp38‑win_amd64.whl', 'imagecodecs_lite‑2019.12.3‑cp38‑cp38‑win32.whl', 'imagecodecs_lite‑2019.12.3‑cp37‑cp37m‑win_amd64.whl', 'imagecodecs_lite‑2019.12.3‑cp37‑cp37m‑win32.whl', 'imagecodecs_lite‑2019.12.3‑cp36‑cp36m‑win_amd64.whl', 'imagecodecs_lite‑2019.12.3‑cp36‑cp36m‑win32.whl', 'imagecodecs_lite‑2019.12.3‑cp35‑cp35m‑win_amd64.whl', 'imagecodecs_lite‑2019.12.3‑cp35‑cp35m‑win32.whl', 'imagecodecs_lite‑2019.12.3‑cp27‑cp27m‑win_amd64.whl', 'imagecodecs_lite‑2019.12.3‑cp27‑cp27m‑win32.whl', 'imagecodecs‑lite‑2019.12.3.tar.gz']
no:1
download imagecodecs_lite‑2020.1.31‑py3‑none‑any.whl
00E:\2345Downloads
Warning (from warnings module):
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python38\lib\site-packages\conf\reader.py", line 39
warnings.warn('cannot parse files of type "%s"' % suffix)
UserWarning: cannot parse files of type ".whl"
{'Range': 'bytes=0-0', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
Traceback (most recent call last):
File "E:\2345Downloads\44.py", line 254, in start
self.__file=open(path,(exists(path) and 'rb+') or 'wb' )
FileNotFoundError: [Errno 2] No such file or directory: 'E:/tmp/imagecodecs_lite-2020.1.31-py3-none-any.whl'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "E:\2345Downloads\44.py", line 616, in
main()
File "E:\2345Downloads\44.py", line 606, in main
d.start()
File "E:\2345Downloads\44.py", line 259, in start
except: log.out('starting failed')
AttributeError: module 'log0' has no attribute 'out'
求高手解决