查看: 1270|回复: 0

[Java学习] java网络爬虫连接超时解决实例代码

发表于 2018-3-2 08:04:57

本文研究的主要是java网络爬虫连接超时的问题,具体如下。

在网络爬虫中,经常会遇到如下报错。即连接超时。针对此问题,一般解决思路为:将连接时间、请求时间设置长一下。如果出现连接超时的情况,则在重新请求【设置重新请求次数】。

  1. Exception in thread "main" java.net.ConnectException: Connection timed out: connect
复制代码

下面的代码便是使用httpclient解决连接超时的样例程序。直接上程序。

  1. package daili;
  2. import java.io.IOException;
  3. import java.net.URI;
  4. import org.apache.http.HttpRequest;
  5. import org.apache.http.HttpResponse;
  6. import org.apache.http.client.ClientProtocolException;
  7. import org.apache.http.client.methods.HttpGet;
  8. import org.apache.http.client.params.CookiePolicy;
  9. import org.apache.http.client.protocol.ClientContext;
  10. import org.apache.http.impl.client.BasicCookieStore;
  11. import org.apache.http.impl.client.CloseableHttpClient;
  12. import org.apache.http.impl.client.DefaultHttpClient;
  13. import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
  14. import org.apache.http.impl.client.HttpClients;
  15. import org.apache.http.impl.cookie.BasicClientCookie2;
  16. import org.apache.http.params.HttpConnectionParams;
  17. import org.apache.http.params.HttpParams;
  18. import org.apache.http.protocol.BasicHttpContext;
  19. import org.apache.http.protocol.ExecutionContext;
  20. import org.apache.http.protocol.HttpContext;
  21. import org.apache.http.util.EntityUtils;
  22. /*
  23. * author:合肥工业大学 管院学院 钱洋
  24. *1563178220@qq.com
  25. */
  26. public class Test1 {
  27. public static void main(String[] args) throws ClientProtocolException, IOException, InterruptedException {
  28. getRawHTML("http://club.autohome.com.cn/bbs/forum-c-2098-1.html#pvareaid=103447");
  29. }
  30. public static String getRawHTML ( String url ) throws ClientProtocolException, IOException, InterruptedException{
  31. //初始化
  32. DefaultHttpClient httpclient = new DefaultHttpClient();
  33. httpclient.getParams().setParameter("http.protocol.cookie-policy",
  34. CookiePolicy.BROWSER_COMPATIBILITY);
  35. //设置参数
  36. HttpParams params = httpclient.getParams();
  37. //连接时间
  38. HttpConnectionParams.setConnectionTimeout(params, 6000);
  39. HttpConnectionParams.setSoTimeout(params, 6000*20);
  40. //超时重新请求次数
  41. DefaultHttpRequestRetryHandler dhr = new DefaultHttpRequestRetryHandler(5,true);
  42. HttpContext localContext = new BasicHttpContext();
  43. HttpRequest request2 = (HttpRequest) localContext.getAttribute(
  44. ExecutionContext.HTTP_REQUEST);
  45. httpclient.setHttpRequestRetryHandler(dhr);
  46. BasicCookieStore cookieStore = new BasicCookieStore();
  47. BasicClientCookie2 cookie = new BasicClientCookie2("Content-Type","text/html;charset=UTF-8");
  48. BasicClientCookie2 cookie1 = new BasicClientCookie2("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36");
  49. cookieStore.addCookie(cookie);
  50. cookieStore.addCookie(cookie1);
  51. localContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore);
  52. HttpGet request = new HttpGet();
  53. request.setURI(URI.create(url));
  54. HttpResponse response = null;
  55. String rawHTML = "";
  56. response = httpclient.execute(request,localContext);
  57. int StatusCode = response.getStatusLine().getStatusCode();
  58. //获取响应状态码
  59. System.out.println(StatusCode);
  60. if(StatusCode == 200){
  61. //状态码200表示响应成功
  62. //获取实体内容
  63. rawHTML = EntityUtils.toString (response.getEntity());
  64. System.out.println(rawHTML);
  65. //输出实体内容
  66. EntityUtils.consume(response.getEntity());
  67. //消耗实体
  68. } else {
  69. //关闭HttpEntity的流实体
  70. EntityUtils.consume(response.getEntity());
  71. //消耗实体
  72. Thread.sleep(20*60*1000);
  73. //如果报错先休息30分钟
  74. }
  75. httpclient.close();
  76. System.out.println(rawHTML);
  77. return rawHTML;
  78. }
  79. }
复制代码

结果:

总结

以上就是本文关于java网络爬虫连接超时解决实例代码的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站其他相关专题,如有不足之处,欢迎留言指出。感谢朋友们对本站的支持!



回复

使用道具 举报