liuzhen231 2016-06-07 01:11 采纳率: 33.3%
浏览 4728
已采纳

用java写一个程序爬取一个网页的内容

  • 写回答

2条回答 默认 最新

  • 易生一世 2016-06-07 02:14
    关注
     package com.hanxb.crawler;
    
    import java.util.*;
    import java.net.*;
    import java.io.*;
    import java.util.regex.*;
    
    // 搜索Web爬行者
    public class NativeCrawler implements Runnable {
    
        /*
         * disallowListCache缓存robot不允许搜索的URL。 
         * Robot协议在Web站点的根目录下设置一个robots.txt文件, 规定站点上的哪些页面是限制搜索的。 
         * 搜索程序应该在搜索过程中跳过这些区域,下面是robots.txt的一个例子: 
         * # robots.txt for
         * http://somehost.com/ User-agent: * Disallow: /cgi-bin/ Disallow: /registration 
         * # /Disallow robots on registration page Disallow: /login
         */
        private HashMap<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>();
    
        ArrayList<String> errorList = new ArrayList<String>();// 错误信息
    
        ArrayList<String> result = new ArrayList<String>(); // 搜索到的结果
        public ArrayList<String> getResult() {
            return result;
        }
    
        String startUrl;// 开始搜索的起点
        int maxUrl;// 最大处理的url数
        String searchString;// 要搜索的字符串(英文)
    
        boolean caseSensitive = false;// 是否区分大小写
        boolean limitHost = false;// 是否在限制的主机内搜索
    
        public NativeCrawler(String startUrl, int maxUrl, String searchString) {
            this.startUrl = startUrl;
            this.maxUrl = maxUrl;
            this.searchString = searchString;
        }
    
        public void run() {// 启动搜索线程
            crawl(startUrl, maxUrl, searchString, limitHost, caseSensitive);
        }
    
        // 执行实际的搜索操作
        public ArrayList<String> crawl(String startUrl, int maxUrls,
                String searchString, boolean limithost, boolean caseSensitive) {
    
            System.out.println("searchString=" + searchString);
    
            HashSet<String> crawledList = new HashSet<String>();
            LinkedHashSet<String> toCrawlList = new LinkedHashSet<String>();
    
            if (maxUrls < 1) {
                errorList.add("Invalid Max URLs value.");
                System.out.println("Invalid Max URLs value.");
            }
    
            if (searchString.length() < 1) {
                errorList.add("Missing Search String.");
                System.out.println("Missing search String");
            }
    
            if (errorList.size() > 0) {
                System.out.println("err!!!");
                return errorList;
            }
    
            // 从开始URL中移出www
            startUrl = removeWwwFromUrl(startUrl);
    
            toCrawlList.add(startUrl);
            while (toCrawlList.size() > 0) {
    
                if (maxUrls != -1) {
                    if (crawledList.size() == maxUrls) {
                        break;
                    }
                }
    
                // Get URL at bottom of the list.
                String url = toCrawlList.iterator().next();
    
                // Remove URL from the to crawl list.
                toCrawlList.remove(url);
    
                // Convert string url to URL object.
                URL verifiedUrl = verifyUrl(url);
    
                // Skip URL if robots are not allowed to access it.
                if (!isRobotAllowed(verifiedUrl)) {
                    continue;
                }
    
                // 增加已处理的URL到crawledList
                crawledList.add(url);
    
                String pageContents = downloadPage(verifiedUrl);
                if (pageContents != null && pageContents.length() > 0) {
                    // 从页面中获取有效的链接
                    ArrayList<String> links = retrieveLinks(verifiedUrl,
                            pageContents, crawledList, limitHost);
    
                    toCrawlList.addAll(links);
    
                    if (searchStringMatches(pageContents, searchString,
                            caseSensitive)) {
                        result.add(url);
                        System.out.println(url);
                    }
                }
    
            }
            return result;
        }
    
        private String downloadPage(URL pageUrl) {
            try {
                // Open connection to URL for reading.
                BufferedReader reader = new BufferedReader(new InputStreamReader(
                        pageUrl.openStream()));
    
                // Read page into buffer.
                String line;
                StringBuffer pageBuffer = new StringBuffer();
                while ((line = reader.readLine()) != null) {
                    pageBuffer.append(line);
                }
    
                return pageBuffer.toString();
            } catch (Exception e) {
            }
    
            return null;
        }
    
        // 在Web页面的内容中,判断是否存在指定的字符串
        private boolean searchStringMatches(String pageContents,
                String searchString, boolean caseSensitive) {
            String searchContents = pageContents;
            if (!caseSensitive) {// 如果不区分大小写
                searchContents = pageContents.toLowerCase();
            }
    
            Pattern p = Pattern.compile("[\\s]+");
            String[] terms = p.split(searchString);
            for (int i = 0; i < terms.length; i++) {
                if (caseSensitive) {
                    if (searchContents.indexOf(terms[i]) == -1) {
                        return false;
                    }
                } else {
                    if (searchContents.indexOf(terms[i].toLowerCase()) == -1) {
                        return false;
                    }
                }
            }
    
            return true;
        }
    
        // 检测URL格式
        private URL verifyUrl(String url) {
            // 只处理HTTPS URLs.
            if (!url.toLowerCase().startsWith("https://"))
                return null;
    
            URL verifiedUrl = null;
            try {
                verifiedUrl = new URL(url);
            } catch (Exception e) {
                return null;//not needed
            }
    
            return verifiedUrl;
        }
    
        // 检测robot是否允许访问给出的URL.
        private boolean isRobotAllowed(URL urlToCheck) {
            String host = urlToCheck.getHost().toLowerCase();// 获取给出RUL的主机
            // System.out.println("主机="+host);
    
            // 获取主机不允许搜索的URL缓存
            ArrayList<String> disallowList = disallowListCache.get(host);
    
            // 如果还没有缓存,下载并缓存。
            if (disallowList == null) {
                disallowList = new ArrayList<String>();
                try {
                    URL robotsFileUrl = new URL("http://" + host + "/robots.txt");
                    BufferedReader reader = new BufferedReader(
                            new InputStreamReader(robotsFileUrl.openStream()));
    
                    // 读robot文件,创建不允许访问的路径列表。
                    String line;
                    while ((line = reader.readLine()) != null) {
                        if (line.indexOf("Disallow:") == 0) {// 是否包含"Disallow:"
                            String disallowPath = line.substring("Disallow:"
                                    .length());// 获取不允许访问路径
    
                            // 检查是否有注释。
                            int commentIndex = disallowPath.indexOf("#");
                            if (commentIndex != -1) {
                                disallowPath = disallowPath.substring(0,
                                        commentIndex);// 去掉注释
                            }
    
                            disallowPath = disallowPath.trim();
                            disallowList.add(disallowPath);
                        }
                    }
    
                    // 缓存此主机不允许访问的路径。
                    disallowListCache.put(host, disallowList);
                } catch (Exception e) {
                    return true; // web站点根目录下没有robots.txt文件,返回真
                }
            }
    
            String file = urlToCheck.getFile();
            // System.out.println("文件getFile()="+file);
            for (int i = 0; i < disallowList.size(); i++) {
                String disallow = disallowList.get(i);
                if (file.startsWith(disallow)) {
                    return false;
                }
            }
    
            return true;
        }
    
        // 从URL中去掉"www"
        private String removeWwwFromUrl(String url) {
            int index = url.indexOf("://www.");
            if (index != -1) {
                return url.substring(0, index + 3) + url.substring(index + 7);
            }
    
            return url;
        }
    
        // 解析页面并找出链接
        private ArrayList<String> retrieveLinks(URL pageUrl, String pageContents,
                HashSet<String> crawledList, boolean limitHost) {
            // 用正则表达式编译链接的匹配模式。
            Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]",
                    Pattern.CASE_INSENSITIVE);
            Matcher m = p.matcher(pageContents);
    
            ArrayList<String> linkList = new ArrayList<String>();
            while (m.find()) {
                String link = m.group(1).trim();
    
                if (link.length() < 1) {
                    continue;
                }
    
                // 跳过链到本页面内链接。
                if (link.charAt(0) == '#') {
                    continue;
                }
    
                if (link.indexOf("mailto:") != -1) {
                    continue;
                }
    
                if (link.toLowerCase().indexOf("javascript") != -1) {
                    continue;
                }
    
                if (link.indexOf("://") == -1) {
                    if (link.charAt(0) == '/') {// 处理绝对地
                        link = "http://" + pageUrl.getHost() + ":"
                                + pageUrl.getPort() + link;
                    } else {
                        String file = pageUrl.getFile();
                        if (file.indexOf('/') == -1) {// 处理相对地址
                            link = "http://" + pageUrl.getHost() + ":"
                                    + pageUrl.getPort() + "/" + link;
                        } else {
                            String path = file.substring(0,
                                    file.lastIndexOf('/') + 1);
                            link = "http://" + pageUrl.getHost() + ":"
                                    + pageUrl.getPort() + path + link;
                        }
                    }
                }
    
                int index = link.indexOf('#');
                if (index != -1) {
                    link = link.substring(0, index);
                }
    
                link = removeWwwFromUrl(link);
    
                URL verifiedLink = verifyUrl(link);
                if (verifiedLink == null) {
                    continue;
                }
    
                /* 如果限定主机,排除那些不合条件的URL */
                if (limitHost
                        && !pageUrl.getHost().toLowerCase()
                                .equals(verifiedLink.getHost().toLowerCase())) {
                    continue;
                }
    
                // 跳过那些已经处理的链接.
                if (crawledList.contains(link)) {
                    continue;
                }
    
                linkList.add(link);
            }
    
            return (linkList);
        }
    
        // 主函数
        public static void main(String[] args) {
            if (args.length != 3) {
                System.out
                        .println("Usage:java NativeCrawler startUrl maxUrl searchString");
                return;
            }
            int max = Integer.parseInt(args[1]);
            NativeCrawler crawler = new NativeCrawler(args[0], max, args[2]);
            Thread search = new Thread(crawler);
            System.out.println("Start searching...");
            System.out.println("result:");
            search.start();
    
        }
    }
    
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(1条)

报告相同问题?

悬赏问题

  • ¥15 stata安慰剂检验作图但是真实值不出现在图上
  • ¥15 c程序不知道为什么得不到结果
  • ¥40 复杂的限制性的商函数处理
  • ¥15 程序不包含适用于入口点的静态Main方法
  • ¥15 素材场景中光线烘焙后灯光失效
  • ¥15 请教一下各位,为什么我这个没有实现模拟点击
  • ¥15 执行 virtuoso 命令后,界面没有,cadence 启动不起来
  • ¥50 comfyui下连接animatediff节点生成视频质量非常差的原因
  • ¥20 有关区间dp的问题求解
  • ¥15 多电路系统共用电源的串扰问题