拿出来单独测试又可以提取,试了很多次好像是异步那里有问题可改来改去还是这样而且这个网站每个视频有的有iframe有的没有过段时间又有,本拿来练手感觉入坑了。。。
from lib2to3.pgen2.grammar import line
import requests
from bs4 import BeautifulSoup
import re
import asyncio
import aiohttp
import aiofiles
url = "https://www.hanjutv2020.com/player/57587.html"
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
resp=requests.get(url)
m_p=BeautifulSoup(resp.text,"html.parser")
src=m_p.find("iframe").get("src")
first_m3u8_url= src.split("=")[1]
music=requests.get(url=first_m3u8_url,headers=headers)
with open('./music/视频6.txt' ,'wb') as file:
file.write(music.content)
url6=first_m3u8_url.split("55555555rpn97s4955568265772q3434_eb242a87122f4219955ec6e9d80aa5db_0_3.m3u8&f")[0]
print(url6)#成功
async def aio_download(up_url):
asyncio.run(aio_download(url6))
tasks=[]
async with aiohttp.ClientSession() as session:
async with aiofiles.open("视频6.txt",mode="r",encoding='utf-8') as f:
async for line in f:
if line.startswith("#"):
continue
line=line.strip()
ts_url=up_url+line
task=asyncio.create_task()
tasks.asyncio.create_task(download_ts(ts_url,line))
await asyncio.wait(task)
async def download_ts(url,name,session):
async with session.get(url) as resp:
async with aiofiles.open("./music/视频5.ts",mode="wb") as f:
await f.write(await resp.content.read())
print("下载完毕")