怎么防止网站被爬虫爬取的几种办法
如何防止网站被爬虫爬取的几种办法
转载自: http://laoxu.blog.51cto.com/4120547/1302013
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
#! /bin/bash LOGFILE=/ var /log/nginx/access.log
PREFIX=/etc/spiders #日志中大部分蜘蛛都有spider的关键字,但是百度的不能封,所以过滤掉百度 grep 'spider' $LOGFILE |grep -v 'Baidu' |awk '{print $1}' >$PREFIX/ip1.txt
# 封掉网易的有道 grep 'YoudaoBot' $LOGFILE | awk '{print $1}' >>$PREFIX/ip1.txt
#封掉雅虎 grep 'Yahoo!' $LOGFILE | awk '{print $1}' >>$PREFIX/ip1.txt
# 过滤掉信任IP sort -n $PREFIX/ip1.txt |uniq |sort |grep -v '192.168.0.' |grep -v '127.0.0.1' >$PREFIX/ip2.txt
# 如果一小时内,发包不超过 30 个就要解封
/sbin/iptables -nvL |awk '$1 <= 30 {print $8}' >$PREFIX/ip3.txt
for ip in `cat $PREFIX/ip3.txt`; do /sbin/iptables -D INPUT -s $ip -j DROP ; done
/sbin/iptables -Z // 将iptables计数器置为0
for ip in `cat $PREFIX/ip2.txt`; do /sbin/iptables -I INPUT -s $ip -j DROP ; done
|
1
2
|
User-agent: * Disallow: / |
1
|
#vim /usr/local/nginx/conf/nginx.conf |
1
2
3
4
5
6
7
8
|
## Block http user agent - wget ## if ($http_user_agent ~* (Wget) ) {
return 403 ;
} ## Block Software download user agents ## if ($http_user_agent ~* LWP::Simple|BBBike|wget) {
return 403 ;
} |
1
|
# /usr/local/nginx/sbin/nginx -s reload |
1
2
3
|
if ($http_user_agent ~ (agent1|agent2|Foo|Wget|Catall Spider|AcoiRobot) ) {
return 403 ;
} |
1
2
3
4
5
6
7
8
|
### 大小写敏感http user agent拒绝### if ($http_user_agent ~ (Catall Spider|AcoiRobot) ) {
return 403 ;
} ### 大小写不敏感http user agent拒绝### if ($http_user_agent ~* (foo|bar) ) {
return 403 ;
} |