m0_59583499
m0_59583499
2021-06-22 18:43
采纳率: 0%
浏览 432
已结题

搜狗搜索结果link加密解密

求助各位大哥.价位1k先付500

就好比

https://www.sogou.com/link?url=DSOYnZeCC_oLiDJQIDpHwgDydjxbYhws

 

求解link后面这一串怎么加密和解密的.

词url就是搜狗搜索了http://www.baidu.com

搜狗搜索任意词.都会加密一串link=xxxxx

知道的可以留下wx 或者私信.求方法.非常有偿!!!

  • 点赞
  • 收藏

9条回答 默认 最新

  • qq_15769939
    小P聊技术 2021-06-23 20:10
    import requests
    from lxml import etree
    import re
    import random
    import json
    from urllib import parse
    
    
    def get_cookie(response1, uigs_para, UserAgent):
        SetCookie = response1.headers['Set-Cookie']
        cookie_params = {
            "ABTEST": re.findall('ABTEST=(.*?);', SetCookie, re.S)[0],
            "SNUID": re.findall('SNUID=(.*?);', SetCookie, re.S)[0],
            "IPLOC": re.findall('IPLOC=(.*?);', SetCookie, re.S)[0],
            "SUID": re.findall('SUID=(.*?);', SetCookie, re.S)[0]
        }
    
        url = "https://www.sogou.com/sug/css/m3.min.v.7.css"
        headers = {
            "Accept": "text/css,*/*;q=0.1",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Connection": "keep-alive",
            "Cookie": "SNUID={}; IPLOC={}".format(cookie_params['SNUID'], cookie_params['IPLOC']),
            "Host": "www.sogou.com",
            "Referer": "https://weixin.sogou.com/",
            "User-Agent": UserAgent
        }
        response2 = requests.get(url, headers=headers)
        SetCookie = response2.headers['Set-Cookie']
        cookie_params['SUID'] = re.findall('SUID=(.*?);', SetCookie, re.S)[0]
    
        url = "https://weixin.sogou.com/websearch/wexinurlenc_sogou_profile.jsp"
        headers = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Connection": "keep-alive",
            "Cookie": "ABTEST={}; SNUID={}; IPLOC={}; SUID={}".format(cookie_params['ABTEST'], cookie_params['SNUID'],
                                                                      cookie_params['IPLOC'],
                                                                      cookie_params['SUID']),
            "Host": "weixin.sogou.com",
            "Referer": response1.url,
            "User-Agent": UserAgent
        }
        response3 = requests.get(url, headers=headers)
        SetCookie = response3.headers['Set-Cookie']
        cookie_params['JSESSIONID'] = re.findall('JSESSIONID=(.*?);', SetCookie, re.S)[0]
    
        url = "https://pb.sogou.com/pv.gif"
        headers = {
            "Accept": "image/webp,*/*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Connection": "keep-alive",
            "Cookie": "SNUID={}; IPLOC={}; SUID={}".format(cookie_params['SNUID'], cookie_params['IPLOC'],
                                                           cookie_params['SUID']),
            "Host": "pb.sogou.com",
            "Referer": "https://weixin.sogou.com/",
            "User-Agent": UserAgent
        }
        response4 = requests.get(url, headers=headers, params=uigs_para)
        SetCookie = response4.headers['Set-Cookie']
        cookie_params['SUV'] = re.findall('SUV=(.*?);', SetCookie, re.S)[0]
    
        return cookie_params
    
    
    def get_k_h(url):
        b = int(random.random() * 100) + 1
        a = url.find("url=")
        url = url + "&k=" + str(b) + "&h=" + url[a + 4 + 21 + b: a + 4 + 21 + b + 1]
        return url
    
    
    def get_uigs_para(response):
        uigs_para = re.findall('var uigs_para = (.*?);', response.text, re.S)[0]
        if 'passportUserId ? "1" : "0"' in uigs_para:
            uigs_para = uigs_para.replace('passportUserId ? "1" : "0"', '0')
        uigs_para = json.loads(uigs_para)
        exp_id = re.findall('uigs_para.exp_id = "(.*?)";', response.text, re.S)[0]
        uigs_para['right'] = 'right0_0'
        uigs_para['exp_id'] = exp_id[:-1]
        return uigs_para
    
    
    def main_v4(list_url, UserAgent):
        headers1 = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Host": "weixin.sogou.com",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": UserAgent,
        }
        response1 = requests.get(list_url, headers=headers1)
        html = etree.HTML(response1.text)
        urls = ['https://weixin.sogou.com' + i for i in html.xpath('//div[@class="img-box"]/a/@href')]
    
        uigs_para = get_uigs_para(response1)
        params = get_cookie(response1, uigs_para, UserAgent)
        approve_url = 'https://weixin.sogou.com/approve?uuid={}'.format(uigs_para['uuid'])
        headers2 = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Connection": "keep-alive",
            "Cookie": "ABTEST={}; IPLOC={}; SUID={}; SUV={}; SNUID={}; JSESSIONID={};".format(params['ABTEST'],
                                                                                              params['IPLOC'],
                                                                                              params['SUID'], params['SUV'],
                                                                                              params['SNUID'],
                                                                                              params['JSESSIONID']),
            "Host": "weixin.sogou.com",
            "Referer": response1.url,
            "User-Agent": UserAgent,
            "X-Requested-With": "XMLHttpRequest"
        }
        for url in urls:
            response2 = requests.get(approve_url, headers=headers2)
            url = get_k_h(url)
            headers3 = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
                "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
                "Connection": "keep-alive",
                "Cookie": "ABTEST={}; SNUID={}; IPLOC={}; SUID={}; JSESSIONID={}; SUV={}".format(params['ABTEST'],
                                                                                                 params['SNUID'],
                                                                                                 params['IPLOC'],
                                                                                                 params['SUID'],
                                                                                                 params['JSESSIONID'],
                                                                                                 params['SUV']),
                "Host": "weixin.sogou.com",
                "Referer": list_url,
                "Upgrade-Insecure-Requests": "1",
                "User-Agent": UserAgent
            }
            response3 = requests.get(url, headers=headers3)
    
            fragments = re.findall("url \+= '(.*?)'", response3.text, re.S)
            itemurl = ''
            for i in fragments:
                itemurl += i
    
            # 文章url拿正文
            headers4 = {
                "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
                "accept-encoding": "gzip, deflate, br",
                "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
                "cache-control": "max-age=0",
                "user-agent": UserAgent
            }
            response4 = requests.get(itemurl, headers=headers4)
            html = etree.HTML(response4.text)
            print(response4.status_code)
            print(html.xpath('//meta[@property="og:title"]/@content')[0])
    
    
    if __name__ == "__main__":
        key = "咸蛋超人"
        url = 'https://weixin.sogou.com/weixin?type=2&s_from=input&query={}&_sug_=n&_sug_type_=&page=1'.format(
            parse.quote(key))
        UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0"
        main_v4(url, UserAgent)
    

    点赞 1 评论
  • technologist_41
    点赞 评论
  • technologist_32
    CSDN专家-Time 2021-06-23 16:28

    这是词条的链接吧。然后后台重定向。

    点赞 评论
  • ledrsnet
    清枫cc 2021-06-22 19:49

    这个看来不是前端编码的,是后端编码之后返回整个页面给客户端的。

    不是内部的人根本不好研究是怎么加密的,你如果想要得到解密后的,可以get请求下,搜狗后端会返回一个本地跳转的脚本,里面有具体的解密后的url地址。

    点赞 评论
  • u010290208
    燕_青 2021-06-23 15:29

    感觉和短链接类似啊,搜狗把搜索关键词编码加密

    点赞 评论
  • Feng_wwf
    NDSC专家-王文峯 2021-06-23 16:16

    望采纳,谢谢

     

    这个通过算法把搜索link后面的加密了,需要解密

    点赞 评论
  • sdhexu
    sdhexu 2021-06-23 16:47

    这是个GET请求的参数,原则上这个加密算法是网站自己定义的,百度搜狗等各大网站都不一样。是前端加密的还是后端也得具体分析后才能知道。可以F12进去找找有没有前端相关的js,如果有直接down出来直接调用就行了,不需要研究它的算法。如果没有那可能是后端加密,那就不好办了。你得大量的数据去测试、验证。。。

    点赞 评论
  • qq_15769939
    小P聊技术 2021-06-23 16:50

    你是不是打算抓取url后面的数据,拿到正常的跳转地址的内容,如果是的话,我们私聊,给你看个东西

     

    点赞 评论
  • tw949561391
    TristanWong 2021-06-24 14:37

    这套加密你不要想着如何把它破解了,就算你这次破解了,搜狗也可以随意调整加密方式,到时又无法解密了。所以你得换个思路,让搜狗帮你实现加解密,你只要通过代码模拟接口,通过接口返回内容来让搜狗主动告诉你加密和解密的方式。实现起来也并不复杂,需要的话我可以给你思路

    点赞 评论

相关推荐