用户昵称真的存在吗 2021-04-09 23:03 采纳率: 75%
浏览 364
已采纳

怎么使函数返回for循环里面的值

 怎么才能使函数返回for循环里面的值?

也就是怎么才能让new_links这个函数将prin(url_111)的内容返回出来,供其他函数调用

我目前这么写,print(url_111)出来的结果是我想要的,但是return返回的url_111是空的。。。这是为什么???

from lxml import etree
import requests
import os
import threading
import queue
from urllib import parse
import time

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36',
    'cookie':'HMACCOUNT_BFESS=5A76C20AA8660D57; BDUSS_BFESS=UwZTVlQ0oxdTNNcVRiUUp3YkZ2YlpTWEtMN2tNSFpFeWdRUlRjRDlxZTBobDFmRVFBQUFBJCQAAAAAAAAAAAEAAAD8Ar-O70yDuv2XAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALT5NV-0-TVfYj; BDSFRCVID_BFESS=dPCOJeC62xqMEQoeDjW8TGYSv07fn5jTH6aocB2aoVUNToy-YSWMEG0PDM8g0Kubo25nogKKBeOTH6KF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF_BFESS=tR-JoDDMJDL3qPTuKITaKDCShUFsKpOmB2Q-5KL-MPjYsfjvbfO83Tk7Qnrg-j8f5D_tBfbdJJjoHp_4-tn43fC7hpuLXlJUBmTxoUJgBCnJhhvG-4PKjtCebPRi3tQ9Qg-qahQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0HPonHj8he5bP; BAIDUID_BFESS=7C613C37D1AFB222B605AA8CCBB44540:FG=1',
}


# 使用代理
proxy = {
    'http':'113.194.143.101:9999'
}


# 获取夜景壁纸列表,返回每个图组的urls
def yejing_pic_list(base_url):
    res = requests.get(base_url,proxies=proxy,headers=headers).content.decode('gbk')
    html = etree.HTML(res)
    Incomplete_url = html.xpath('//li[@class="photo-list-padding"]/a/@href') # 返回的是['/bizhi/8139_101249_2.html', '/bizhi/7947_98766_2.html', '/bizhi/7741_96199_2.html', '/bizhi/7568_93902_2.html', '/bizhi/7203_89142_2.html', '/bizhi/7000_86950_2.html', '/bizhi/6704_83622_2.html', '/bizhi/6383_78552_2.html', '/bizhi/6062_75033_2.html']
    s = 'https://desk.zol.com.cn'
    global name
    name = html.xpath('//li[@class="photo-list-padding"]/a/span/em/text()')  # 返回的是['繁华的都市唯美夜景壁纸', '城市图片-城市夜景壁纸图', '瑰丽的城市夜景壁纸', '都市夜景犹如漫画游戏般梦', '古城夜景桌面壁纸', '厦门夜景桌面壁纸', '2016年Bing夜景主题桌面壁', '高空视角城市夜景桌面壁纸', '唯美夜景图片-唯美夜景图']
    # 拼接url   s+Incomplete_url
    for i in range(len(Incomplete_url)):
        Incomplete_url[i] = s + Incomplete_url[i]
    urls_list = Incomplete_url                                                    # 返回的是['https://desk.zol.com.cn/bizhi/8139_101249_2.html', 'https://desk.zol.com.cn/bizhi/7947_98766_2.html', 'https://desk.zol.com.cn/bizhi/7741_96199_2.html', 'https://desk.zol.com.cn/bizhi/7568_93902_2.html', 'https://desk.zol.com.cn/bizhi/7203_89142_2.html', 'https://desk.zol.com.cn/bizhi/7000_86950_2.html', 'https://desk.zol.com.cn/bizhi/6704_83622_2.html', 'https://desk.zol.com.cn/bizhi/6383_78552_2.html', 'https://desk.zol.com.cn/bizhi/6062_75033_2.html']
    return name,urls_list

# 返回每张图的网页链接
def secande(urls_list):
    list_1 = []
    for x in urls_list:
        resp = requests.get(x,headers=headers).content.decode('gbk')
        html = etree.HTML(resp)
        links = html.xpath('//div[@class="photo-list-box"]/ul//li/a/@href')
        s = 'https://desk.zol.com.cn'                                      
        url_links = [s+i for i in links]
        list_1.append(url_links)
    return list_1

# 返回每张图片的真实链接
def new_links(list_1):
    for i in list_1:
        print('='*220)
        url_111 = {}
        for index,y in enumerate(i):
            resp = requests.get(y,headers=headers).content.decode('gbk')
            html_1 = etree.HTML(resp)
            adress = html_1.xpath('//div[@id="mouscroll"]/img/@src')[0].replace('t_s960x600c5','t_s1920x1080')
            url_111[f'img_No.{index+1}'] = adress
        print(url_111)
       
    return url_111

# 下载图片
# def downlond(path,url_111):
#     print(url_111)
#     for img_links in url_111.items():
#         index,img_links = img_links
#         # print(url_111)
#         with open(os.path.join(path, f'{index}.jpg'),'wb') as fp:
#             fp.write(requests.get(img_links,headers=headers).content)  

# 创建目录路径
# def dir_path(name):
#     for index,n in enumerate(name):
#         path = os.path.join('zol高清壁纸',f'{index+1}'+'_'+n)
#         if not os.path.exists(path):
#             os.mkdir(path)
#         return path           

def main():
    base_url = 'https://desk.zol.com.cn/fengjing/yejing/'
    name,urls_list = yejing_pic_list(base_url)
    list_1 = secande(urls_list)
    url_111 = new_links(list_1)
    # path = dir_path(name)
    # downlond(path,url_111)

if __name__=='__main__':
    main()










 这是打印的结果,是我想要的,但是将这个字典传入 下载图片 这个函数里面,传进来的url_111却是空的

  • 写回答

2条回答 默认 最新

  • CSDN专家-HGJ 2021-04-09 23:57
    关注

    你要从for循环中获取汇总数据,要在循环外定义一个列表,在循环体内进行append,代码中new_links函数这样改就可以了。

    def new_links(list_1):
        urls=[]
        for i in list_1:
            #print('='*220)
            for index,y in enumerate(i):
                resp = requests.get(y,headers=headers).content.decode('gbk')
                html_1 = etree.HTML(resp)
                adress = html_1.xpath('//div[@id="mouscroll"]/img/@src')[0].replace('t_s960x600c5','t_s1920x1080')
                urls.append({f'img_No.{index+1}':adress})
           
        return urls
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(1条)

报告相同问题?

悬赏问题

  • ¥60 版本过低apk如何修改可以兼容新的安卓系统
  • ¥25 由IPR导致的DRIVER_POWER_STATE_FAILURE蓝屏
  • ¥50 有数据,怎么建立模型求影响全要素生产率的因素
  • ¥50 有数据,怎么用matlab求全要素生产率
  • ¥15 TI的insta-spin例程
  • ¥15 完成下列问题完成下列问题
  • ¥15 C#算法问题, 不知道怎么处理这个数据的转换
  • ¥15 YoloV5 第三方库的版本对照问题
  • ¥15 请完成下列相关问题!
  • ¥15 drone 推送镜像时候 purge: true 推送完毕后没有删除对应的镜像,手动拷贝到服务器执行结果正确在样才能让指令自动执行成功删除对应镜像,如何解决?