爬虫Larbin解析(二)——sequencer()
分类:
IT文章
•
2022-05-08 10:17:02
分析的函数: void sequencer()
//位置:larbin-2.6.3/src/fetch/sequencer.cc
void sequencer()
{
bool testPriority = true;
if (space == 0) //unit space = 0
{
space = global::inter->putAll();
}
int still = space;
if (still > maxPerCall) //#define maxPerCall 100
still = maxPerCall;
while (still)
{
if (canGetUrl(&testPriority))
{
--space;
--still;
}
else
{
still = 0;
}
}
}
所在的文件
larbin-2.6.3/src/fetch/sequencer.h、larbin-2.6.3/src/fetch/sequencer.cc
// Larbin
// Sebastien Ailleret
// 15-11-99 -> 15-11-99
#ifndef SEQUENCER_H
#define SEQUENCER_H
/** only for debugging, handle with care */
extern uint space;
/** Call the sequencer */
void sequencer ();
#endif
View Code
// Larbin
// Sebastien Ailleret
// 15-11-99 -> 04-01-02
#include <iostream.h>
#include "options.h"
#include "global.h"
#include "types.h"
#include "utils/url.h"
#include "utils/debug.h"
#include "fetch/site.h"
static bool canGetUrl (bool *testPriority);
uint space = 0;
#define maxPerCall 100
/** start the sequencer*/
//按优先度将URL放到代爬站点
void sequencer()
{
bool testPriority = true;
if (space == 0)
{
space = global::inter->putAll();
}
int still = space;
if (still > maxPerCall) //#define maxPerCall 100
still = maxPerCall;
while (still)
{
if (canGetUrl(&testPriority))
{
space--;
still--;
}
else
{
still = 0;
}
}
}
/* Get the next url
* here is defined how priorities are handled
按优先级从各个URL队列
(比如URLsDisk,URLsDiskWait或URLsPriority,URLsPriorityWait)
获取url保存到某个NameSite(通过url的hash值)
*/
static bool canGetUrl (bool *testPriority)
{
url *u;
if (global::readPriorityWait) // global.cc 赋值为0
{
global::readPriorityWait--;
u = global::URLsPriorityWait->get();
global::namedSiteList[u->hostHashCode()].putPriorityUrlWait(u);
return true;
}
else if (*testPriority && (u=global::URLsPriority->tryGet()) != NULL)
{
// We've got one url (priority)
global::namedSiteList[u->hostHashCode()].putPriorityUrl(u);
return true;
}
else
{
*testPriority = false;
// Try to get an ordinary url
if (global::readWait)
{
global::readWait--;
u = global::URLsDiskWait->get();
global::namedSiteList[u->hostHashCode()].putUrlWait(u);
return true;
}
else
{
u = global::URLsDisk->tryGet();
if (u != NULL)
{
global::namedSiteList[u->hostHashCode()].putUrl(u);
return true;
}
else
{
return false;
}
}
}
}
View Code
一、 对于space = global::inter->putAll();
1. interf在global.cc(位置:/larbin-2.6.3/src/global.cc)中的定义为
inter = new Interval(ramUrls); //#define ramUrls 100000 (位置:larbin-2.6.3/src/types.h)
批注:区别 inter = new Interval(ramUrls); 和 inter = new Interval[ramUrls]; 前一个()内是参数,要传入构造函数的;后一个[]内是开辟数组的个数。
2. 类 Interval定义(位置:/larbin-2.6.3/src/fetch/site.h)
/** This class is intended to make sure the sum of the
* sizes of the fifo included in the different sites
* are not too big
*/
class Interval
{
public:
Interval (uint sizes) : size(sizes), pos(0) {}
~Interval () {}
/** How many urls can we put. Answer 0: if no urls can be put */
inline uint putAll ()
{
int res = size - pos;
pos = size;
return res;
}
/** Warn an url has been retrieved */
inline void getOne ()
{
--pos;
}
/** only for debugging, handle with care */
inline uint getPos ()
{
return pos;
}
private:
/** Size of the interval */
uint size;
/** Position in the interval */
uint pos;
};
View Code
批注:类内的函数定义为inline。对内联函数的几点说明:
- 内联函数避免函数调用的开销。将函数指定为内联函数,(通常)就是将它在程序的每个调用点上“内联地”展开,消除调用函数进行的额外开销(调用前先保存寄存器,并在返回时回复)。内联说明(在函数返回值前加inline)对编译器来说只是一个建议,编译器可以选择忽略。一般内敛函数适用于优化小的、只有几行、经常被调用的函数。大多数编译器不支持递归函数的内敛。
- 把内联函数放在头文件。以便编译器能够在调用点展开同一个函数(保证编译器可见、所有的定义相同)。
- 编译器隐式地将在类内定义的成员函数当作为内联函数.
二、 对于canGetUrl(&testPriority)
函数定义(位置larbin-2.6.3/src/fetch/sequencer.cc)
/* Get the next url
* here is defined how priorities are handled
按优先级从各个URL队列
(比如URLsDisk,URLsDiskWait或URLsPriority,URLsPriorityWait)
获取url保存到某个NameSite(通过url的hash值)
at "global.cc"
// FIFOs
URLsDisk = new PersistentFifo(reload, fifoFile);
URLsDiskWait = new PersistentFifo(reload, fifoFileWait);
URLsPriority = new SyncFifo<url>;
URLsPriorityWait = new SyncFifo<url>;
*/
static bool canGetUrl (bool *testPriority)
{
url *u;
if (global::readPriorityWait != 0) // 在global.cc声明定义: uint global::readPriorityWait=0;
{
global::readPriorityWait--;
u = global::URLsPriorityWait->get();
global::namedSiteList[u->hostHashCode()].putPriorityUrlWait(u);
return true;
}
else if (*testPriority && (u=global::URLsPriority->tryGet()) != NULL)
{
// We've got one url (priority)
global::namedSiteList[u->hostHashCode()].putPriorityUrl(u);
return true;
}
else
{
*testPriority = false;
// Try to get an ordinary url
if (global::readWait)
{
global::readWait--;
u = global::URLsDiskWait->get();
global::namedSiteList[u->hostHashCode()].putUrlWait(u);
return true;
}
else
{
u = global::URLsDisk->tryGet();
if (u != NULL)
{
global::namedSiteList[u->hostHashCode()].putUrl(u);
return true;
}
else
{
return false;
}
}
}
}
1. 为什么disk和priority的队列都是成对出现的,是因为可以认为每个site在namedSiteList当中都有一个小的队列来保存它的url,这个url的个数是有个数限制的,当超过这个限制的时候就不能再把该site下的url放入,但也不能丢弃,而是放入wait队列。Larbin会控制一段时间在disk队列中取url,一段时间在diskWait当中取url。disk和priority的区别只是优先级的区别。namedSiteList的作用是实现了DNS缓存。

2. global::readPriorityWait 的值由main.cc的cron()函数中变化得知
// see if we should read again urls in fifowait
if ((global::now % 300) == 0) {
global::readPriorityWait = global::URLsPriorityWait->getLength();
global::readWait = global::URLsDiskWait->getLength();
}
if ((global::now % 300) == 150) {
global::readPriorityWait = 0;
global::readWait = 0;
}
这里global::now%300是判断这次是对wait里的url进行处理,还是对不是wait里的进行处理,这里的%300等于0和150的概率都是1/300,所以大约300次换一次。readPriorityWait是URLsPriorityWait中的长度(也就是url的数量);readWait是URLsDiskWait中url的个数。
3. 在canGetUrl中,在对于每个站点,将相应的url放进去。putPriorityUrlWait, putPriorityUrl, putUrlWait, putUrl在site.h的定义如下
/** Put an url in the fifo
* If there are too much, put it back in UrlsInternal
* Never fill totally the fifo => call at least with 1 */
void putGenericUrl(url *u, int limit, bool prio);
inline void putUrl(url *u) {
putGenericUrl(u, 15, false);
}
inline void putUrlWait(url *u) {
putGenericUrl(u, 10, false);
}
inline void putPriorityUrl(url *u) {
putGenericUrl(u, 5, true);
}
inline void putPriorityUrlWait(url *u) {
putGenericUrl(u, 1, true);
}
可以发现,每次都是调用函数putGenericUrl,其定义如下
/* Put an url in the fifo if their are not too many */
void NamedSite::putGenericUrl(url *u, int limit, bool prio)
{
if (nburls > maxUrlsBySite - limit)
{
// Already enough Urls in memory for this Site
// first check if it can already be forgotten
if (!strcmp(name, u->getHost()))
{
if (dnsState == errorDns)
{
nburls++;
forgetUrl(u, noDNS);
return;
}
if (dnsState == noConnDns)
{
nburls++;
forgetUrl(u, noConnection);
return;
}
if (u->getPort() == port && dnsState == doneDns && !testRobots(u->getFile()))
{
nburls++;
forgetUrl(u, forbiddenRobots);
return;
}
}
// else put it back in URLsDisk
refUrl();
global::inter->getOne();
if (prio)
{
global::URLsPriorityWait->put(u);
}
else
{
global::URLsDiskWait->put(u);
}
}
如果已经有足够多的url在内存里,执行这里if中的代码,strcmp(name,u->getHost())是判断这个主机是不是已经就进行过dns方面的判断,也就是说对于一个站点,只做一次dns解析的判断,以后就按这个结果进行处理,dnsState有noDns,noConnDns,还有robots.txt不允许的情况,如果没有问题,就把它放到URLsDisk中。
else {
nburls++;
if (dnsState == waitDns || strcmp(name, u->getHost()) || port
!= u->getPort() || global::now > dnsTimeout) {
// dns not done or other site
putInFifo(u);
addNamedUrl();
// Put Site in fifo if not yet in
if (!isInFifo) {
isInFifo = true;
global::dnsSites->put(this);
}
} else
switch (dnsState) {
case doneDns:
transfer(u);
break;
case errorDns:
forgetUrl(u, noDNS);
break;
default: // noConnDns
forgetUrl(u, noConnection);
}
}
如果需要判断dns能不能解析,就将它放到dnsSites里,这个会在fetchDns中判断。或是如果还能放到内存里,并且又是doneDns,表示可以解析,就调用transfer:
void NamedSite::transfer(url *u) {
if (testRobots(u->getFile())) {
if (global::proxyAddr == NULL) {
memcpy(&u->addr, &addr, sizeof(struct in_addr));
}
global::IPSiteList[ipHash].putUrl(u);
} else {
forgetUrl(u, forbiddenRobots);
}
}
这里是将url放入到IPSiteList的相应ipHash中。
附类的定义
类url定义(larbin-2.6.3/src/utils/url.h larbin-2.6.3/src/utils/url.cc)
// Larbin
// Sebastien Ailleret
// 15-11-99 -> 14-03-02
/* This class describes an URL */
#ifndef URL_H
#define URL_H
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <stdlib.h>
#include "types.h"
bool fileNormalize (char *file);
class url {
private:
char *host;
char *file;
uint16_t port; // the order of variables is important for physical size
int8_t depth;
/* parse the url */
void parse (char *s);
/** parse a file with base */
void parseWithBase (char *u, url *base);
/* normalize file name */
bool normalize (char *file);
/* Does this url starts with a protocol name */
bool isProtocol (char *s);
/* constructor used by giveBase */
url (char *host, uint port, char *file);
public:
/* Constructor : Parses an url (u is deleted) */
url (char *u, int8_t depth, url *base);
/* constructor used by input */
url (char *line, int8_t depth);
/* Constructor : read the url from a file (cf serialize) */
url (char *line);
/* Destructor */
~url ();
/* inet addr (once calculated) */
struct in_addr addr;
/* Is it a valid url ? */
bool isValid ();
/* print an URL */
void print ();
/* return the host */
inline char *getHost () { return host; }
/* return the port */
inline uint getPort () { return port; }
/* return the file */
inline char *getFile () { return file; }
/** Depth in the Site */
inline int8_t getDepth () { return depth; }
/* Set depth to max if we are at an entry point in the site
* try to find the ip addr
* answer false if forbidden by robots.txt, true otherwise */
bool initOK (url *from);
/** return the base of the url
* give means that you have to delete the string yourself
*/
url *giveBase ();
/** return a char * representation of the url
* give means that you have to delete the string yourself
*/
char *giveUrl ();
/** write the url in a buffer
* buf must be at least of size maxUrlSize
* returns the size of what has been written (not including ' ')
*/
int writeUrl (char *buf);
/* serialize the url for the Persistent Fifo */
char *serialize ();
/* very thread unsafe serialisation in a static buffer */
char *getUrl();
/* return a hashcode for the host of this url */
uint hostHashCode ();
/* return a hashcode for this url */
uint hashCode ();
#ifdef URL_TAGS
/* tag associated to this url */
uint tag;
#endif // URL_TAGS
#ifdef COOKIES
/* cookies associated with this page */
char *cookie;
void addCookie(char *header);
#else // COOKIES
inline void addCookie(char *header) {}
#endif // COOKIES
};
#endif // URL_H
View Code
// Larbin
// Sebastien Ailleret
// 15-11-99 -> 16-03-02
/* This class describes an URL */
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "options.h"
#include "types.h"
#include "global.h"
#include "utils/url.h"
#include "utils/text.h"
#include "utils/connexion.h"
#include "utils/debug.h"
#ifdef COOKIES
#define initCookie() cookie=NULL
#else // COOKIES
#define initCookie() ((void) 0)
#endif // COOKIES
/* small functions used later */
static uint siteHashCode (char *host) {
uint h=0;
uint i=0;
while (host[i] != 0) {
h = 37*h + host[i];
i++;
}
return h % namedSiteListSize;
}
/* return the int with correspond to a char
* -1 if not an hexa char */
static int int_of_hexa (char c) {
if (c >= '0' && c <= '9')
return (c - '0');
else if (c >= 'a' && c <= 'f')
return (c - 'a' + 10);
else if (c >= 'A' && c <= 'F')
return (c - 'A' + 10);
else
return -1;
}
/* normalize a file name : also called by robots.txt parser
* return true if it is ok, false otherwise (cgi-bin)
*/
bool fileNormalize (char *file) {
int i=0;
while (file[i] != 0 && file[i] != '#') {
if (file[i] == '/') {
if (file[i+1] == '.' && file[i+2] == '/') {
// suppress /./
int j=i+3;
while (file[j] != 0) {
file[j-2] = file[j];
j++;
}
file[j-2] = 0;
} else if (file[i+1] == '/') {
// replace // by /
int j=i+2;
while (file[j] != 0) {
file[j-1] = file[j];
j++;
}
file[j-1] = 0;
} else if (file[i+1] == '.' && file[i+2] == '.' && file[i+3] == '/') {
// suppress /../
if (i == 0) {
// the file name starts with /../ : error
return false;
} else {
int j = i+4, dec;
i--;
while (file[i] != '/') { i--; }
dec = i+1-j; // dec < 0
while (file[j] != 0) {
file[j+dec] = file[j];
j++;
}
file[j+dec] = 0;
}
} else if (file[i+1] == '.' && file[i+2] == 0) {
// suppress /.
file[i+1] = 0;
return true;
} else if (file[i+1] == '.' && file[i+2] == '.' && file[i+3] == 0) {
// suppress /..
if (i == 0) {
// the file name starts with /.. : error
return false;
} else {
i--;
while (file[i] != '/') {
i--;
}
file[i+1] = 0;
return true;
}
} else { // nothing special, go forward
i++;
}
} else if (file[i] == '%') {
int v1 = int_of_hexa(file[i+1]);
int v2 = int_of_hexa(file[i+2]);
if (v1 < 0 || v2 < 0) return false;
char c = 16 * v1 + v2;
if (isgraph(c)) {
file[i] = c;
int j = i+3;
while (file[j] != 0) {
file[j-2] = file[j];
j++;
}
file[j-2] = 0;
i++;
} else if (c == ' ' || c == '/') { // keep it with the % notation
i += 3;
} else { // bad url
return false;
}
} else { // nothing special, go forward
i++;
}
}
file[i] = 0;
return true;
}
/**************************************/
/* definition of methods of class url */
/**************************************/
/* Constructor : Parses an url */
url::url (char *u, int8_t depth, url *base) {
newUrl();
this->depth = depth;
host = NULL;
port = 80;
file = NULL;
initCookie();
#ifdef URL_TAGS
tag = 0;
#endif // URL_TAGS
if (startWith("http://", u)) {
// absolute url
parse (u + 7);
// normalize file name
if (file != NULL && !normalize(file)) {
delete [] file;
file = NULL;
delete [] host;
host = NULL;
}
} else if (base != NULL) {
if (startWith("http:", u)) {
parseWithBase(u+5, base);
} else if (isProtocol(u)) {
// Unknown protocol (mailto, ftp, news, file, gopher...)
} else {
parseWithBase(u, base);
}
}
}
/* constructor used by input */
url::url (char *line, int8_t depth) {
newUrl();
this->depth = depth;
host = NULL;
port = 80;
file = NULL;
initCookie();
int i=0;
#ifdef URL_TAGS
tag = 0;
while (line[i] >= '0' && line[i] <= '9') {
tag = 10*tag + line[i] - '0';
i++;
}
i++;
#endif // URL_TAGS
if (startWith("http://", line+i)) {
parse(line+i+7);
// normalize file name
if (file != NULL && !normalize(file)) {
delete [] file;
file = NULL;
delete [] host;
host = NULL;
}
}
}
/* Constructor : read the url from a file (cf serialize)
*/
url::url (char *line) {
newUrl();
int i=0;
// Read depth
depth = 0;
while (line[i] >= '0' && line[i] <= '9') {
depth = 10*depth + line[i] - '0';
i++;
}
#ifdef URL_TAGS
// read tag
tag = 0; i++;
while (line[i] >= '0' && line[i] <= '9') {
tag = 10*tag + line[i] - '0';
i++;
}
#endif // URL_TAGS
int deb = ++i;
// Read host
while (line[i] != ':') {
i++;
}
line[i] = 0;
host = newString(line+deb);
i++;
// Read port
port = 0;
while (line[i] >= '0' && line[i] <= '9') {
port = 10*port + line[i] - '0';
i++;
}
#ifndef COOKIES
// Read file name
file = newString(line+i);
#else // COOKIES
char *cpos = strchr(line+i, ' ');
if (cpos == NULL) {
cookie = NULL;
} else {
*cpos = 0;
// read cookies
cookie = new char[maxCookieSize];
strcpy(cookie, cpos+1);
}
// Read file name
file = newString(line+i);
#endif // COOKIES
}
/* constructor used by giveBase */
url::url (char *host, uint port, char *file) {
newUrl();
initCookie();
this->host = host;
this->port = port;
this->file = file;
}
/* Destructor */
url::~url () {
delUrl();
delete [] host;
delete [] file;
#ifdef COOKIES
delete [] cookie;
#endif // COOKIES
}
/* Is it a valid url ? */
bool url::isValid () {
if (host == NULL) return false;
int lh = strlen(host);
return file!=NULL && lh < maxSiteSize
&& lh + strlen(file) + 18 < maxUrlSize;
}
/* print an URL */
void url::print () {
printf("http://%s:%u%s
", host, port, file);
}
/* Set depth to max if necessary
* try to find the ip addr
* answer false if forbidden by robots.txt, true otherwise */
bool url::initOK (url *from) {
#if defined(DEPTHBYSITE) || defined(COOKIES)
if (strcmp(from->getHost(), host)) { // different site
#ifdef DEPTHBYSITE
depth = global::depthInSite;
#endif // DEPTHBYSITE
} else { // same site
#ifdef COOKIES
if (from->cookie != NULL) {
cookie = new char[maxCookieSize];
strcpy(cookie, from->cookie);
}
#endif // COOKIES
}
#endif // defined(DEPTHBYSITE) || defined(COOKIES)
if (depth < 0) {
errno = tooDeep;
return false;
}
NamedSite *ns = global::namedSiteList + (hostHashCode());
if (!strcmp(ns->name, host) && ns->port == port) {
switch (ns->dnsState) {
case errorDns:
errno = fastNoDns;
return false;
case noConnDns:
errno = fastNoConn;
return false;
case doneDns:
if (!ns->testRobots(file)) {
errno = fastRobots;
return false;
}
}
}
return true;
}
/* return the base of the url */
url *url::giveBase () {
int i = strlen(file);
assert (file[0] == '/');
while (file[i] != '/') {
i--;
}
char *newFile = new char[i+2];
memcpy(newFile, file, i+1);
newFile[i+1] = 0;
return new url(newString(host), port, newFile);
}
/** return a char * representation of the url
* give means that you have to delete the string yourself
*/
char *url::giveUrl () {
char *tmp;
int i = strlen(file);
int j = strlen(host);
tmp = new char[18+i+j]; // 7 + j + 1 + 9 + i + 1
// http://(host):(port)(file)
strcpy(tmp, "http://");
strcpy (tmp+7, host);
j += 7;
if (port != 80) {
j += sprintf(tmp + j, ":%u", port);
}
// Copy file name
while (i >= 0) {
tmp [j+i] = file[i];
i--;
}
return tmp;
}
/** write the url in a buffer
* buf must be at least of size maxUrlSize
* returns the size of what has been written (not including ' ')
*/
int url::writeUrl (char *buf) {
if (port == 80)
return sprintf(buf, "http://%s%s", host, file);
else
return sprintf(buf, "http://%s:%u%s", host, port, file);
}
/* serialize the url for the Persistent Fifo */
char *url::serialize () {
// this buffer is protected by the lock of PersFifo
static char statstr[maxUrlSize+40+maxCookieSize];
int pos = sprintf(statstr, "%u ", depth);
#ifdef URL_TAGS
pos += sprintf(statstr+pos, "%u ", tag);
#endif // URL_TAGS
pos += sprintf(statstr+pos, "%s:%u%s", host, port, file);
#ifdef COOKIES
if (cookie != NULL) {
pos += sprintf(statstr+pos, " %s", cookie);
}
#endif // COOKIES
statstr[pos] = '
';
statstr[pos+1] = 0;
return statstr;
}
/* very thread unsafe serialisation in a static buffer */
char *url::getUrl() {
static char statstr[maxUrlSize+40];
sprintf(statstr, "http://%s:%u%s", host, port, file);
return statstr;
}
/* return a hashcode for the host of this url */
uint url::hostHashCode () {
return siteHashCode (host);
}
/* return a hashcode for this url */
uint url::hashCode () {
unsigned int h=port;
unsigned int i=0;
while (host[i] != 0) {
h = 31*h + host[i];
i++;
}
i=0;
while (file[i] != 0) {
h = 31*h + file[i];
i++;
}
return h % hashSize;
}
/* parses a url :
* at the end, arg must have its initial state,
* http:// has allready been suppressed
*/
void url::parse (char *arg) {
int deb = 0, fin = deb;
// Find the end of host name (put it into lowerCase)
while (arg[fin] != '/' && arg[fin] != ':' && arg[fin] != 0) {
fin++;
}
if (fin == 0) return;
// get host name
host = new char[fin+1];
for (int i=0; i<fin; i++) {
host[i] = lowerCase(arg[i]);
}
host[fin] = 0;
// get port number
if (arg[fin] == ':') {
port = 0;
fin++;
while (arg[fin] >= '0' && arg[fin] <= '9') {
port = port*10 + arg[fin]-'0';
fin++;
}
}
// get file name
if (arg[fin] != '/') {
// www.inria.fr => add the final /
file = newString("/");
} else {
file = newString(arg + fin);
}
}
/** parse a file with base
*/
void url::parseWithBase (char *u, url *base) {
// cat filebase and file
if (u[0] == '/') {
file = newString(u);
} else {
uint lenb = strlen(base->file);
char *tmp = new char[lenb + strlen(u) + 1];
memcpy(tmp, base->file, lenb);
strcpy(tmp + lenb, u);
file = tmp;
}
if (!normalize(file)) {
delete [] file;
file = NULL;
return;
}
host = newString(base->host);
port = base->port;
}
/** normalize file name
* return true if it is ok, false otherwise (cgi-bin)
*/
bool url::normalize (char *file) {
return fileNormalize(file);
}
/* Does this url starts with a protocol name */
bool url::isProtocol (char *s) {
uint i = 0;
while (isalnum(s[i])) {
i++;
}
return s[i] == ':';
}
#ifdef COOKIES
#define addToCookie(s) len = strlen(cookie);
strncpy(cookie+len, s, maxCookieSize-len);
cookie[maxCookieSize-1] = 0;
/* see if a header contain a new cookie */
void url::addCookie(char *header) {
if (startWithIgnoreCase("set-cookie: ", header)) {
char *pos = strchr(header+12, ';');
if (pos != NULL) {
int len;
if (cookie == NULL) {
cookie = new char[maxCookieSize];
cookie[0] = 0;
} else {
addToCookie("; ");
}
*pos = 0;
addToCookie(header+12);
*pos = ';';
}
}
}
#endif // COOKIES