就是因为这个爬取的网站他有一个主url,然后里面的子页面源代码都是后面不一样,想着怎么让他们拼接起来,我自己的代码如下,求指点
import requests
from bs4 import BeautifulSoup
import time
url = "https://www.umei.cc/p/gaoqing/"
resp = requests.get(url)
resp.encoding = "utf-8"
# print(resp.text)
# 把源代码交给BS4
main_page = BeautifulSoup(resp.text, "html.parser")
alist = main_page.find("div", class_="TypeList").find_all("a")
# print(alist)
for a in alist:
href = a.get('href') # 直接通过get拿到属性的值
#拿到子页面的源代码
child_page_resp = requests.get(href)
child_page_resp.encoding = 'utf-8'
child_page_text = child_page_resp.text
# 从子页面中拿到图片的下载链接
child_page = BeautifulSoup(child_page_text, "html.parser")
p = child_page.find("div", id_="ArticleId")
img = p.find("img")
print(img.get("src"))
#下载图片
img_resp = requests.get(src)
# img_resp.content # 这里拿到的是字节,字节写到文件里不就是图片嘛
img_name = src.split("/")[-1] #拿到url中最后一个/以后的内容
with open("img/"+img_name, mode="wb") as f:
f.write(img_resp.content) #图片内容写入文件
print("over!", img_name)
time.sleep(1)
print("all over")