自己写的代码无法读取某网站的rss,用浏览器确可以打开,其他网站rss都是一般的
自己写的代码无法读取某网站的rss,用浏览器确可以打开,其他网站rss都是正常的
尝试读取tccf的rss
地址为:http://et8.org/torrentrss.php?rows=10&ismalldescr=1
试过用HttpWebRequest,webclient,xmldocument.load方式,统统显示超时,但是用浏览器打开一切正常,将地址放到下载器中也能下载到正确的torrentrss.xml
用xmldocument.load读取CHD,TTG的rss都是正常读取的。
测试用的HttpWebRequest代码如下
public static string DownloadHtmlPage(string pageUrl, Encoding encoding, string requestMethod, int timeOut)
{
string value = string.Empty;
HttpWebResponse response = null;
Stream data = null;
StreamReader sr = null;
try
{
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(pageUrl);
request.Method = requestMethod;
if (timeOut != -1) request.Timeout = timeOut;
response = (HttpWebResponse)request.GetResponse();
data = response.GetResponseStream();
sr = new StreamReader(data, encoding);
string str;
StringBuilder source = new StringBuilder();
while ((str = sr.ReadLine()) != null)
source.Append(str).Append("\r\n");
value = source.ToString();
}
catch (WebException e)
{
MessageBox.Show(e.Message);
}
finally
{
if (sr != null) sr.Close();
if (data != null) data.Close();
if (response != null) response.Close();
}
return value;
}
请问是否是对方做了什么限制的原因还是两个网站有什么不同?
------解决方案--------------------
本机测试过下面的代码可以正常获取
尝试读取tccf的rss
地址为:http://et8.org/torrentrss.php?rows=10&ismalldescr=1
试过用HttpWebRequest,webclient,xmldocument.load方式,统统显示超时,但是用浏览器打开一切正常,将地址放到下载器中也能下载到正确的torrentrss.xml
用xmldocument.load读取CHD,TTG的rss都是正常读取的。
测试用的HttpWebRequest代码如下
public static string DownloadHtmlPage(string pageUrl, Encoding encoding, string requestMethod, int timeOut)
{
string value = string.Empty;
HttpWebResponse response = null;
Stream data = null;
StreamReader sr = null;
try
{
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(pageUrl);
request.Method = requestMethod;
if (timeOut != -1) request.Timeout = timeOut;
response = (HttpWebResponse)request.GetResponse();
data = response.GetResponseStream();
sr = new StreamReader(data, encoding);
string str;
StringBuilder source = new StringBuilder();
while ((str = sr.ReadLine()) != null)
source.Append(str).Append("\r\n");
value = source.ToString();
}
catch (WebException e)
{
MessageBox.Show(e.Message);
}
finally
{
if (sr != null) sr.Close();
if (data != null) data.Close();
if (response != null) response.Close();
}
return value;
}
请问是否是对方做了什么限制的原因还是两个网站有什么不同?
------解决方案--------------------
本机测试过下面的代码可以正常获取
public static string UserAgent = @"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11";
public static string DownloadHtmlPage(string pageUrl, Encoding encoding, string requestMethod, int timeOut)
{
string value = string.Empty;
HttpWebResponse response = null;
Stream data = null;
StreamReader sr = null;
try
{
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(pageUrl);
request.Method = requestMethod;
request.ServicePoint.Expect100Continue = false;
//是否使用 Nagle 不使用 提高效率
request.ServicePoint.UseNagleAlgorithm = false;
//最大连接数
request.ServicePoint.ConnectionLimit = 65500;
//数据是否缓冲 false 提高效率
request.AllowWriteStreamBuffering = false;
request.UserAgent = UserAgent;
request.KeepAlive = true;
request.AutomaticDecompression = DecompressionMethods.GZip
------解决方案--------------------
DecompressionMethods.Deflate;
request.Headers.Add("Accept-Charset", "GBK,utf-8;q=0.7,*;q=0.3");
request.Headers.Add("Accept-Encoding:gzip,deflate,sdch");
request.Headers.Add("Accept-Language", "zh-CN,zh;q=0.8");
request.Headers.Add("Cache-Control", "max-age=0");