C语言爬虫

C语言爬虫
http://bbs.bccn.net/thread-504909-1-1.html
https://cloud.tencent.com/developer/information/c%E8%AF%AD%E8%A8%80%E7%BC%96%E5%86%99%E7%88%AC%E8%99%AB
参考爬http的爬虫代码,只要GET + source + HTTP/1.1 +host ,然后接受返回的消息就是网页内容。
现在很多网站都相机改成https的了,我试了还是用爬http的代码爬了一下https的网站,但是得到的是  Your request has bad syntax or is inherently impossible to satisfy.
抓包看了一下,
1、www.zhihu.com
三次握手后(应该是connect之后自带的三次握手之后吧?),作为访问端发送的还是第一个包还是 GET / HTTP /1.1 和其他的一些内容。但是这里其他的内容有:Host,Connection,Accept,Upgrade-Insecure-Requests,User-Agent,Referer,Accept-Encoding,Accept-Language,Cookie,udid,_zap,d_c0,l_cap_id(抓的是访问知乎首页的包),服务器返回的是 HTTP/1.1 302 Found 和一些参数包括cookie之类的东西.
然后客户端和服务器互相发了两次TCP包(是不是也是tcp/ip帮我们已经实现的保持通联的或者确认的包?),之后就是client hello 、server hello

2、www.taobao.com
三次握手之后,直接就是 client hello和server hello

问题:
1、访问https网站到底需不需要先GET。如果需要的话,像访问知乎中Cookie,udid,_zap,d_c0,l_cap_id等参数是怎么确定的?(因为我直接用GET + source + HTTP/1.1 +host,返回的是 Your request has bad syntax or is inherently impossible to satisfy)


下面是参考的爬虫代码
C语言爬虫程序代码:
//#include <Windows.h>#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include "winsock2.h"
#include <time.h>
#include <queue>
#include <hash_set>

#pragma comment(lib, "ws2_32.lib") 
using namespace std;

#define DEFAULT_PAGE_BUF_SIZE 1048576

queue<string> hrefUrl;
hash_set<string> visitedUrl;
hash_set<string> visitedImg;
int depth=0;
int g_ImgCnt=1;

//解析URL,解析出主机名,资源名bool ParseURL( const string & url, string & host, string & resource){
    if ( strlen(url.c_str()) > 2000 ) {
        return false;
    }

    const char * pos = strstr( url.c_str(), "http://" );
    if( pos==NULL ) pos = url.c_str();
    else pos += strlen("http://");
    char pHost[100];
    char pResource[2000];
    if( strstr( pos, "/")==0 )
    {
        pResource[0]='/';pResource[1]=0;
        strcpy(pHost,pos);
    }
    else        
        sscanf( pos, "%[^/]%s", pHost, pResource );//以%s的形式读取,  %[^a]表示匹配非a的任意字符,直到遇到a停止读入
            //这里pHost取“http://”后面到第一个‘/’之间的字符串,从第一个‘/’到结束都传给pResource    host = pHost;
    resource = pResource;
    return true;
}

//使用Get请求,得到响应bool GetHttpResponse( const string & url, char * &response, int &bytesRead ){
    string host, resource;
    if(!ParseURL( url, host, resource )){
        cout << "Can not parse the url"<<endl;
        return false;
    }
    
    //建立socket    struct hostent * hp= gethostbyname( host.c_str() );
    if( hp==NULL ){
        cout<< "Can not find host address"<<endl;
        return false;
    }

    SOCKET sock = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP);
    if( sock == -1 || sock == -2 ){
        cout << "Can not create sock."<<endl;
        return false;
    }

    //建立服务器地址    SOCKADDR_IN sa;
    sa.sin_family = AF_INET;
    sa.sin_port = htons( 80 );
    //char addr[5];
    //memcpy( addr, hp->h_addr, 4 );
    //sa.sin_addr.s_addr = inet_addr(hp->h_addr);    memcpy( &sa.sin_addr, hp->h_addr, 4 );

    //建立连接    if( 0!= connect( sock, (SOCKADDR*)&sa, sizeof(sa) ) ){
        cout << "Can not connect: "<< url <<endl;
        closesocket(sock);
        return false;
    };

    //准备发送数据    string request = "GET " + resource + " HTTP/1.1
Host:" + host + "
Connection:Close

";//字符串拼接

    //发送数据    if( SOCKET_ERROR ==send( sock, request.c_str(), request.size(), 0 ) ){//request.size()=strlen(request.c_str)        cout << "send error" <<endl;
        closesocket( sock );
        return false;
    }

    //接收数据    int m_nContentLength = DEFAULT_PAGE_BUF_SIZE;
    char *pageBuf = (char *)malloc(m_nContentLength);
    memset(pageBuf, 0, m_nContentLength);

    bytesRead = 0;
    int ret = 1;
    cout <<"Read: ";
    while(ret > 0)
    {
        ret = recv(sock, pageBuf + bytesRead, m_nContentLength - bytesRead, 0);        
        if(ret > 0)
        {
            bytesRead += ret;
        }

        if( m_nContentLength - bytesRead<100)
        {
            cout << "
Realloc memorry"<<endl;
            m_nContentLength *=2;
            pageBuf = (char*)realloc( pageBuf, m_nContentLength);       //重新分配内存        }
        cout << ret <<"";
    }
    cout <<endl;

    pageBuf[bytesRead] = '