public static class CrawlReducer
extends TableReducer{
private String url = "";
@Override
protected void reduce(Text key, Iterable value, Context context)
throws IOException, InterruptedException{
this.url = key.toString();
final WebClient webClient = new WebClient(BrowserVersion.CHROME);
webClient.getOptions().setCssEnabled(false);
webClient.getOptions().setJavaScriptEnabled(true);
webClient.getOptions().setThrowExceptionOnScriptError(false);
webClient.setAjaxController(new NicelyResynchronizingAjaxController());
webClient.getOptions().setTimeout(5000);
HtmlPage page = null;
try {
page = webClient.getPage(url);//运行到这句就出错
} catch (FailingHttpStatusCodeException e1) {
e1.printStackTrace();
} catch (MalformedURLException e1) {
e1.printStackTrace();
} catch (IOException e1) {
e1.printStackTrace();
}
String s = page.getPage().asXml();
// Put 实例化,每个词存一行
Put put = new Put(Bytes.toBytes(url));
// 列族为 content,列修饰符为 count,列值为数目
put.add(Bytes.toBytes("content"), Bytes.toBytes("html"),
Bytes.toBytes(s));
context.write(NullWritable.get(), put);
}
这是Map从Hbase读URL,在reduce里面解析网页过程。运行到getPage就结束了。没币鸟,求大神解脱,好人一生平安