1 /***************************************************************************
3 * Project ___| | | | _ \| |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2004, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 * $Id: transfer.c,v 1.212 2004/03/16 09:16:38 bagder Exp $
22 ***************************************************************************/
26 /* -- WIN32 approved -- */
32 #ifdef HAVE_SYS_TYPES_H
33 #include <sys/types.h>
39 #include "strtoofft.h"
42 #if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
46 #ifdef HAVE_SYS_SOCKET_H
47 #include <sys/socket.h>
49 #ifdef HAVE_NETINET_IN_H
50 #include <netinet/in.h>
57 #ifdef HAVE_ARPA_INET_H
58 #include <arpa/inet.h>
63 #ifdef HAVE_SYS_IOCTL_H
64 #include <sys/ioctl.h>
68 #ifdef HAVE_SYS_PARAM_H
69 #include <sys/param.h>
72 #ifdef HAVE_SYS_SELECT_H
73 #include <sys/select.h>
77 #error "We can't compile without select() support!"
80 #error "We can't compile without socket() support!"
86 #include <curl/curl.h>
87 #include <curl/types.h>
90 #include "content_encoding.h"
94 #include "speedcheck.h"
101 #include "http_digest.h"
102 #include "http_ntlm.h"
103 #include "http_negotiate.h"
106 #define _MPRINTF_REPLACE /* use our functions only */
107 #include <curl/mprintf.h>
109 /* The last #include file should be: */
111 #include "memdebug.h"
114 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
122 /* We keep this static and global since this is read-only and NEVER
123 changed. It should just remain a blanked-out timeout value. */
124 static struct timeval notimeout={0,0};
127 * This function will call the read callback to fill our buffer with data
130 static int fillbuffer(struct connectdata *conn,
133 int buffersize = bytes;
136 if(conn->bits.upload_chunky) {
137 /* if chunked Transfer-Encoding */
138 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
139 conn->upload_fromhere += 10; /* 32bit hex + CRLF */
142 nread = conn->fread(conn->upload_fromhere, 1,
143 buffersize, conn->fread_in);
145 if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
146 /* if chunked Transfer-Encoding */
148 int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
150 /* move buffer pointer */
151 conn->upload_fromhere -= hexlen;
154 /* copy the prefix to the buffer */
155 memcpy(conn->upload_fromhere, hexbuffer, hexlen);
157 /* always append CRLF to the data */
158 memcpy(conn->upload_fromhere + nread, "\r\n", 2);
160 if((nread - hexlen) == 0) {
161 /* mark this as done once this chunk is transfered */
162 conn->keep.upload_done = TRUE;
165 nread+=2; /* for the added CRLF */
173 * Returns TRUE if member of the list matches prefix of string
176 checkhttpprefix(struct SessionHandle *data,
179 struct curl_slist *head = data->set.http200aliases;
182 if (checkprefix(head->data, s))
187 if(checkprefix("HTTP/", s))
193 CURLcode Curl_readwrite(struct connectdata *conn,
196 struct Curl_transfer_keeper *k = &conn->keep;
197 struct SessionHandle *data = conn->data;
199 ssize_t nread; /* number of bytes read */
202 /* These two are used only if no other select() or _fdset() have been
203 invoked before this. This typicly happens if you use the multi interface
204 and call curl_multi_perform() without calling curl_multi_fdset()
209 fd_set *readfdp = k->readfdp;
210 fd_set *writefdp = k->writefdp;
211 curl_off_t contentlength;
213 if((k->keepon & KEEP_READ) && !readfdp) {
214 /* reading is requested, but no socket descriptor pointer was set */
215 FD_ZERO(&extrareadfd);
216 FD_SET(conn->sockfd, &extrareadfd);
217 readfdp = &extrareadfd;
219 /* no write, no exceptions, no timeout */
220 select(conn->sockfd+1, readfdp, NULL, NULL, ¬imeout);
222 if((k->keepon & KEEP_WRITE) && !writefdp) {
223 /* writing is requested, but no socket descriptor pointer was set */
224 FD_ZERO(&extrawritefd);
225 FD_SET(conn->writesockfd, &extrawritefd);
226 writefdp = &extrawritefd;
228 /* no read, no exceptions, no timeout */
229 select(conn->writesockfd+1, NULL, writefdp, NULL, ¬imeout);
233 /* If we still have reading to do, we check if we have a readable
234 socket. Sometimes the reafdp is NULL, if no fd_set was done using
235 the multi interface and then we can do nothing but to attempt a
237 if((k->keepon & KEEP_READ) &&
238 (!readfdp || FD_ISSET(conn->sockfd, readfdp))) {
240 bool readdone = TRUE;
242 /* This is where we loop until we have read everything there is to
243 read or we get a EWOULDBLOCK */
245 int buffersize = data->set.buffer_size?
246 data->set.buffer_size:BUFSIZE -1;
248 /* receive data from the network! */
249 int readrc = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
251 /* subzero, this would've blocked */
253 break; /* get out of loop */
255 /* get the CURLcode from the int */
256 result = (CURLcode)readrc;
261 if ((k->bytecount == 0) && (k->writebytecount == 0)) {
262 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
263 if(k->wait100_after_headers)
264 /* set time stamp to compare with when waiting for the 100 */
265 k->start100 = Curl_tvnow();
268 didwhat |= KEEP_READ;
270 /* NULL terminate, allowing string ops to be used */
274 /* if we receive 0 or less here, the server closed the connection and
275 we bail out from this! */
276 else if (0 >= nread) {
277 k->keepon &= ~KEEP_READ;
278 FD_ZERO(&k->rkeepfd);
283 /* Default buffer to use when we write the buffer, it may be changed
284 in the flow below before the actual storing is done. */
287 /* Since this is a two-state thing, we check if we are parsing
288 headers at the moment or not. */
290 /* we are in parse-the-header-mode */
291 bool stop_reading = FALSE;
293 /* header line within buffer loop */
300 /* str_start is start of line within buf */
301 k->str_start = k->str;
303 k->end_ptr = strchr (k->str_start, '\n');
306 /* Not a complete header line within buffer, append the data to
307 the end of the headerbuff. */
309 if (k->hbuflen + nread >= data->state.headersize) {
310 /* We enlarge the header buffer as it is too small */
312 long newsize=CURLMAX((k->hbuflen+nread)*3/2,
313 data->state.headersize*2);
314 hbufp_index = k->hbufp - data->state.headerbuff;
315 newbuff = (char *)realloc(data->state.headerbuff, newsize);
317 failf (data, "Failed to alloc memory for big header!");
318 return CURLE_OUT_OF_MEMORY;
320 data->state.headersize=newsize;
321 data->state.headerbuff = newbuff;
322 k->hbufp = data->state.headerbuff + hbufp_index;
324 memcpy(k->hbufp, k->str, nread);
327 if (!k->headerline && (k->hbuflen>5)) {
328 /* make a first check that this looks like a HTTP header */
329 if(!checkhttpprefix(data, data->state.headerbuff)) {
330 /* this is not the beginning of a HTTP first header line */
332 k->badheader = HEADER_ALLBAD;
337 break; /* read more and try again */
340 /* decrease the size of the remaining (supposed) header line */
341 rest_length = (k->end_ptr - k->str)+1;
342 nread -= rest_length;
344 k->str = k->end_ptr + 1; /* move past new line */
346 full_length = k->str - k->str_start;
349 * We're about to copy a chunk of data to the end of the
350 * already received header. We make sure that the full string
351 * fit in the allocated header buffer, or else we enlarge
354 if (k->hbuflen + full_length >=
355 data->state.headersize) {
357 long newsize=CURLMAX((k->hbuflen+full_length)*3/2,
358 data->state.headersize*2);
359 hbufp_index = k->hbufp - data->state.headerbuff;
360 newbuff = (char *)realloc(data->state.headerbuff, newsize);
362 failf (data, "Failed to alloc memory for big header!");
363 return CURLE_OUT_OF_MEMORY;
365 data->state.headersize= newsize;
366 data->state.headerbuff = newbuff;
367 k->hbufp = data->state.headerbuff + hbufp_index;
370 /* copy to end of line */
371 strncpy (k->hbufp, k->str_start, full_length);
372 k->hbufp += full_length;
373 k->hbuflen += full_length;
375 k->end_ptr = k->hbufp;
377 k->p = data->state.headerbuff;
380 * We now have a FULL header line that p points to
384 /* the first read header */
386 !checkhttpprefix(data, data->state.headerbuff)) {
387 /* this is not the beginning of a HTTP first header line */
390 /* since there's more, this is a partial bad header */
391 k->badheader = HEADER_PARTHEADER;
393 /* this was all we read so its all a bad header */
394 k->badheader = HEADER_ALLBAD;
401 if (('\n' == *k->p) || ('\r' == *k->p)) {
403 /* Zero-length header line means end of headers! */
406 k->p++; /* pass the \r byte */
408 k->p++; /* pass the \n byte */
410 if(100 == k->httpcode) {
412 * We have made a HTTP PUT or POST and this is 1.1-lingo
413 * that tells us that the server is OK with this and ready
414 * to receive the data.
415 * However, we'll get more headers now so we must get
416 * back into the header-parsing state!
419 k->headerline = 0; /* restart the header line counter */
420 /* if we did wait for this do enable write now! */
421 if (k->write_after_100_header) {
423 k->write_after_100_header = FALSE;
424 FD_SET (conn->writesockfd, &k->writefd); /* write */
425 k->keepon |= KEEP_WRITE;
426 k->wkeepfd = k->writefd;
430 k->header = FALSE; /* no more header to parse! */
432 if (417 == k->httpcode) {
434 * we got: "417 Expectation Failed" this means:
435 * we have made a HTTP call and our Expect Header
436 * seems to cause a problem => abort the write operations
437 * (or prevent them from starting).
439 k->write_after_100_header = FALSE;
440 k->keepon &= ~KEEP_WRITE;
441 FD_ZERO(&k->wkeepfd);
444 /* now, only output this if the header AND body are requested:
446 writetype = CLIENTWRITE_HEADER;
447 if (data->set.http_include_header)
448 writetype |= CLIENTWRITE_BODY;
450 headerlen = k->p - data->state.headerbuff;
452 result = Curl_client_write(data, writetype,
453 data->state.headerbuff,
458 data->info.header_size += headerlen;
459 conn->headerbytecount += headerlen;
461 if (conn->resume_from &&
463 (data->set.httpreq==HTTPREQ_GET)) {
464 if(k->httpcode == 416) {
465 /* "Requested Range Not Satisfiable" */
469 /* we wanted to resume a download, although the server
470 doesn't seem to support this and we did this with a GET
471 (if it wasn't a GET we did a POST or PUT resume) */
472 failf (data, "HTTP server doesn't seem to support "
473 "byte ranges. Cannot resume.");
474 return CURLE_HTTP_RANGE_ERROR;
479 /* *auth_act() checks what authentication methods that are
480 available and decides which one (if any) to use. It will
481 set 'newurl' if an auth metod was picked. */
482 Curl_http_auth_act(conn);
486 * really end-of-headers.
488 * If we requested a "no body", this is a good time to get
489 * out and return home.
491 if(data->set.no_body)
494 /* If we know the expected size of this document, we set the
495 maximum download size to the size of the expected
496 document or else, we won't know when to stop reading!
498 Note that we set the download maximum even if we read a
499 "Connection: close" header, to make sure that
500 "Content-Length: 0" still prevents us from attempting to
501 read the (missing) response-body.
503 /* According to RFC2616 section 4.4, we MUST ignore
504 Content-Length: headers if we are now receiving data
505 using chunked Transfer-Encoding.
510 if(-1 != conn->size) {
511 Curl_pgrsSetDownloadSize(data, conn->size);
512 conn->maxdownload = conn->size;
515 /* If max download size is *zero* (nothing) we already
516 have nothing and can safely return ok now! */
517 if(0 == conn->maxdownload)
521 /* we make sure that this socket isn't read more now */
522 k->keepon &= ~KEEP_READ;
523 FD_ZERO(&k->rkeepfd);
526 break; /* exit header line loop */
529 /* We continue reading headers, so reset the line-based
530 header parsing variables hbufp && hbuflen */
531 k->hbufp = data->state.headerbuff;
537 * Checks for special headers coming up.
540 if (!k->headerline++) {
541 /* This is the first header, it MUST be the error code line
542 or else we consiser this to be the body right away! */
543 int httpversion_major;
544 int nc=sscanf (k->p, " HTTP/%d.%d %3d",
549 k->httpversion += 10 * httpversion_major;
552 /* this is the real world, not a Nirvana
553 NCSA 1.5.x returns this crap when asked for HTTP/1.1
555 nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
558 /* If user has set option HTTP200ALIASES,
559 compare header line against list of aliases
562 if (checkhttpprefix(data, k->p)) {
566 (data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
572 data->info.httpcode = k->httpcode;
573 data->info.httpversion = k->httpversion;
575 /* 404 -> URL not found! */
576 if (data->set.http_fail_on_error &&
577 (k->httpcode >= 400)) {
578 /* If we have been told to fail hard on HTTP-errors,
579 here is the check for that: */
580 /* serious error, go home! */
581 failf (data, "The requested URL returned error: %d",
583 return CURLE_HTTP_RETURNED_ERROR;
586 if(k->httpversion == 10)
587 /* Default action for HTTP/1.0 must be to close, unless
588 we get one of those fancy headers that tell us the
589 server keeps it open for us! */
590 conn->bits.close = TRUE;
592 switch(k->httpcode) {
594 /* (quote from RFC2616, section 10.2.5): The server has
595 * fulfilled the request but does not need to return an
596 * entity-body ... The 204 response MUST NOT include a
597 * message-body, and thus is always terminated by the first
598 * empty line after the header fields. */
600 case 416: /* Requested Range Not Satisfiable, it has the
601 Content-Length: set as the "real" document but no
602 actual response is sent. */
604 /* (quote from RFC2616, section 10.3.5): The 304 response
605 * MUST NOT contain a message-body, and thus is always
606 * terminated by the first empty line after the header
617 k->header = FALSE; /* this is not a header line */
622 /* Check for Content-Length: header lines to get size. Ignore
623 the header completely if we get a 416 response as then we're
624 resuming a document that we don't get, and this header contains
625 info about the true size of the document we didn't get now. */
626 if ((k->httpcode != 416) &&
627 checkprefix("Content-Length:", k->p)) {
628 contentlength = strtoofft(k->p+15, NULL, 10);
629 if (data->set.max_filesize && contentlength >
630 data->set.max_filesize) {
631 failf(data, "Maximum file size exceeded");
632 return CURLE_FILESIZE_EXCEEDED;
634 conn->size = contentlength;
636 /* check for Content-Type: header lines to get the mime-type */
637 else if (checkprefix("Content-Type:", k->p)) {
642 /* Find the first non-space letter */
644 *start && isspace((int)*start);
647 end = strchr(start, '\r');
649 end = strchr(start, '\n');
652 /* skip all trailing space letters */
653 for(; isspace((int)*end) && (end > start); end--);
655 /* get length of the type */
658 /* allocate memory of a cloned copy */
659 Curl_safefree(data->info.contenttype);
661 data->info.contenttype = malloc(len + 1);
662 if (NULL == data->info.contenttype)
663 return CURLE_OUT_OF_MEMORY;
665 /* copy the content-type string */
666 memcpy(data->info.contenttype, start, len);
667 data->info.contenttype[len] = 0; /* zero terminate */
670 else if((k->httpversion == 10) &&
671 conn->bits.httpproxy &&
672 Curl_compareheader(k->p,
673 "Proxy-Connection:", "keep-alive")) {
675 * When a HTTP/1.0 reply comes when using a proxy, the
676 * 'Proxy-Connection: keep-alive' line tells us the
677 * connection will be kept alive for our pleasure.
678 * Default action for 1.0 is to close.
680 conn->bits.close = FALSE; /* don't close when done */
681 infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
683 else if((k->httpversion == 10) &&
684 Curl_compareheader(k->p, "Connection:", "keep-alive")) {
686 * A HTTP/1.0 reply with the 'Connection: keep-alive' line
687 * tells us the connection will be kept alive for our
688 * pleasure. Default action for 1.0 is to close.
690 * [RFC2068, section 19.7.1] */
691 conn->bits.close = FALSE; /* don't close when done */
692 infof(data, "HTTP/1.0 connection set to keep alive!\n");
694 else if (Curl_compareheader(k->p, "Connection:", "close")) {
696 * [RFC 2616, section 8.1.2.1]
697 * "Connection: close" is HTTP/1.1 language and means that
698 * the connection will close when this request has been
701 conn->bits.close = TRUE; /* close when done */
703 else if (Curl_compareheader(k->p,
704 "Transfer-Encoding:", "chunked")) {
706 * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
707 * means that the server will send a series of "chunks". Each
708 * chunk starts with line with info (including size of the
709 * coming block) (terminated with CRLF), then a block of data
710 * with the previously mentioned size. There can be any amount
711 * of chunks, and a chunk-data set to zero signals the
713 conn->bits.chunk = TRUE; /* chunks coming our way */
715 /* init our chunky engine */
716 Curl_httpchunk_init(conn);
718 else if (checkprefix("Content-Encoding:", k->p) &&
719 data->set.encoding) {
721 * Process Content-Encoding. Look for the values: identity,
722 * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
723 * x-compress are the same as gzip and compress. (Sec 3.5 RFC
724 * 2616). zlib cannot handle compress. However, errors are
725 * handled further down when the response body is processed
729 /* Find the first non-space letter */
731 *start && isspace((int)*start);
734 /* Record the content-encoding for later use */
735 if (checkprefix("identity", start))
736 k->content_encoding = IDENTITY;
737 else if (checkprefix("deflate", start))
738 k->content_encoding = DEFLATE;
739 else if (checkprefix("gzip", start)
740 || checkprefix("x-gzip", start))
741 k->content_encoding = GZIP;
742 else if (checkprefix("compress", start)
743 || checkprefix("x-compress", start))
744 k->content_encoding = COMPRESS;
746 else if (Curl_compareheader(k->p, "Content-Range:", "bytes")) {
747 /* Content-Range: bytes [num]-
748 Content-Range: bytes: [num]-
750 The second format was added August 1st 2000 by Igor
751 Khristophorov since Sun's webserver JavaWebServer/1.1.1
752 obviously sends the header this way! :-( */
754 char *ptr = strstr(k->p, "bytes");
758 /* stupid colon skip */
761 k->offset = strtoofft(ptr, NULL, 10);
763 if (conn->resume_from == k->offset)
764 /* we asked for a resume and we got it */
765 k->content_range = TRUE;
767 else if(data->cookies &&
768 checkprefix("Set-Cookie:", k->p)) {
769 Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
770 CURL_LOCK_ACCESS_SINGLE);
771 Curl_cookie_add(data,
772 data->cookies, TRUE, k->p+11,
773 /* If there is a custom-set Host: name, use it
774 here, or else use real peer host name. */
775 conn->allocptr.cookiehost?
776 conn->allocptr.cookiehost:conn->name,
778 Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
780 else if(checkprefix("Last-Modified:", k->p) &&
781 (data->set.timecondition || data->set.get_filetime) ) {
782 time_t secs=time(NULL);
783 k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
785 if(data->set.get_filetime)
786 data->info.filetime = k->timeofdoc;
788 else if((checkprefix("WWW-Authenticate:", k->p) &&
789 (401 == k->httpcode)) ||
790 (checkprefix("Proxy-authenticate:", k->p) &&
791 (407 == k->httpcode))) {
792 result = Curl_http_auth(conn, k->httpcode, k->p);
796 else if ((k->httpcode >= 300 && k->httpcode < 400) &&
797 checkprefix("Location:", k->p)) {
798 if(data->set.http_follow_location) {
799 /* this is the URL that the server advices us to get instead */
804 start += 9; /* pass "Location:" */
806 /* Skip spaces and tabs. We do this to support multiple
807 white spaces after the "Location:" keyword. */
808 while(*start && isspace((int)*start ))
811 /* Scan through the string from the end to find the last
812 non-space. k->end_ptr points to the actual terminating zero
813 letter, move pointer one letter back and start from
814 there. This logic strips off trailing whitespace, but keeps
815 any embedded whitespace. */
817 while((ptr>=start) && isspace((int)*ptr))
821 backup = *ptr; /* store the ending letter */
823 *ptr = '\0'; /* zero terminate */
824 conn->newurl = strdup(start); /* clone string */
825 *ptr = backup; /* restore ending letter */
828 #if 0 /* for consideration */
830 /* This is a Location: but we have not been instructed to
832 infof(data, "We ignore this location header as instructed\n");
838 * End of header-checks. Write them to the client.
841 writetype = CLIENTWRITE_HEADER;
842 if (data->set.http_include_header)
843 writetype |= CLIENTWRITE_BODY;
845 if(data->set.verbose)
846 Curl_debug(data, CURLINFO_HEADER_IN,
849 result = Curl_client_write(data, writetype, k->p, k->hbuflen);
853 data->info.header_size += k->hbuflen;
854 conn->headerbytecount += k->hbuflen;
856 /* reset hbufp pointer && hbuflen */
857 k->hbufp = data->state.headerbuff;
860 while (!stop_reading && *k->str); /* header line within buffer */
863 /* We've stopped dealing with input, get out of the do-while loop */
866 /* We might have reached the end of the header part here, but
867 there might be a non-header part left in the end of the read
870 } /* end if header mode */
872 /* This is not an 'else if' since it may be a rest from the header
873 parsing, where the beginning of the buffer is headers and the end
875 if (k->str && !k->header && (nread > 0)) {
877 if(0 == k->bodywrites) {
878 /* These checks are only made the first time we are about to
879 write a piece of the body */
880 if(conn->protocol&PROT_HTTP) {
881 /* HTTP-only checks */
884 if(conn->bits.close) {
885 /* Abort after the headers if "follow Location" is set
886 and we're set to close anyway. */
887 k->keepon &= ~KEEP_READ;
888 FD_ZERO(&k->rkeepfd);
892 /* We have a new url to load, but since we want to be able
893 to re-use this connection properly, we read the full
894 response in "ignore more" */
895 k->ignorebody = TRUE;
896 infof(data, "Ignoring the response-body\n");
898 if(data->set.timecondition && !conn->range) {
899 /* A time condition has been set AND no ranges have been
900 requested. This seems to be what chapter 13.3.4 of
901 RFC 2616 defines to be the correct action for a
903 if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
904 switch(data->set.timecondition) {
905 case CURL_TIMECOND_IFMODSINCE:
907 if(k->timeofdoc < data->set.timevalue) {
909 "The requested document is not new enough\n");
914 case CURL_TIMECOND_IFUNMODSINCE:
915 if(k->timeofdoc > data->set.timevalue) {
917 "The requested document is not old enough\n");
923 } /* two valid time strings */
924 } /* we have a time condition */
927 } /* this is the first time we write a body part */
930 /* pass data to the debug function before it gets "dechunked" */
931 if(data->set.verbose) {
933 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
935 if(k->badheader == HEADER_PARTHEADER)
936 Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
939 Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
942 if(conn->bits.chunk) {
944 * Bless me father for I have sinned. Here comes a chunked
945 * transfer flying and we need to decode this properly. While
946 * the name says read, this function both reads and writes away
947 * the data. The returned 'nread' holds the number of actual
948 * data it wrote to the client. */
950 Curl_httpchunk_read(conn, k->str, nread, &nread);
952 if(CHUNKE_OK < res) {
953 if(CHUNKE_WRITE_ERROR == res) {
954 failf(data, "Failed writing data");
955 return CURLE_WRITE_ERROR;
957 failf(data, "Received problem %d in the chunky parser", res);
958 return CURLE_RECV_ERROR;
960 else if(CHUNKE_STOP == res) {
961 /* we're done reading chunks! */
962 k->keepon &= ~KEEP_READ; /* read no more */
963 FD_ZERO(&k->rkeepfd);
965 /* There are now possibly N number of bytes at the end of the
966 str buffer that weren't written to the client, but we don't
967 care about them right now. */
969 /* If it returned OK, we just keep going */
972 if((-1 != conn->maxdownload) &&
973 (k->bytecount + nread >= conn->maxdownload)) {
974 nread = (ssize_t) (conn->maxdownload - k->bytecount);
975 if(nread < 0 ) /* this should be unusual */
978 k->keepon &= ~KEEP_READ; /* we're done reading */
979 FD_ZERO(&k->rkeepfd);
982 k->bytecount += nread;
984 Curl_pgrsSetDownloadCounter(data, k->bytecount);
986 if(!conn->bits.chunk && (nread || k->badheader)) {
987 /* If this is chunky transfer, it was already written */
989 if(k->badheader && !k->ignorebody) {
990 /* we parsed a piece of data wrongly assuming it was a header
991 and now we output it as body instead */
992 result = Curl_client_write(data, CLIENTWRITE_BODY,
993 data->state.headerbuff,
996 if(k->badheader < HEADER_ALLBAD) {
997 /* This switch handles various content encodings. If there's an
998 error here, be sure to check over the almost identical code
1000 Make sure that ALL_CONTENT_ENCODINGS contains all the
1001 encodings handled here. */
1003 switch (k->content_encoding) {
1006 /* This is the default when the server sends no
1007 Content-Encoding header. See Curl_readwrite_init; the
1008 memset() call initializes k->content_encoding to zero. */
1010 result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
1016 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1017 result = Curl_unencode_deflate_write(data, k, nread);
1021 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1022 result = Curl_unencode_gzip_write(data, k, nread);
1027 failf (data, "Unrecognized content encoding type. "
1028 "libcurl understands `identity', `deflate' and `gzip' "
1029 "content encodings.");
1030 result = CURLE_BAD_CONTENT_ENCODING;
1035 k->badheader = HEADER_NORMAL; /* taken care of now */
1041 } /* if (! header and data to read ) */
1045 } /* if( read from socket ) */
1047 /* If we still have writing to do, we check if we have a writable
1048 socket. Sometimes the writefdp is NULL, if no fd_set was done using
1049 the multi interface and then we can do nothing but to attempt a
1050 write to be sure. */
1051 if((k->keepon & KEEP_WRITE) &&
1052 (!writefdp || FD_ISSET(conn->writesockfd, writefdp)) ) {
1056 ssize_t bytes_written;
1057 bool writedone=TRUE;
1059 if ((k->bytecount == 0) && (k->writebytecount == 0))
1060 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1062 didwhat |= KEEP_WRITE;
1065 * We loop here to do the READ and SEND loop until we run out of
1066 * data to send or until we get EWOULDBLOCK back
1070 /* only read more data if there's no upload data already
1071 present in the upload buffer */
1072 if(0 == conn->upload_present) {
1073 /* init the "upload from here" pointer */
1074 conn->upload_fromhere = k->uploadbuf;
1076 if(!k->upload_done) {
1077 /* HTTP pollution, this should be written nicer to become more
1078 protocol agnostic. */
1080 if(k->wait100_after_headers &&
1081 (conn->proto.http->sending == HTTPSEND_BODY)) {
1082 /* If this call is to send body data, we must take some action:
1083 We have sent off the full HTTP 1.1 request, and we shall now
1084 go into the Expect: 100 state and await such a header */
1085 k->wait100_after_headers = FALSE; /* headers sent */
1086 k->write_after_100_header = TRUE; /* wait for the header */
1087 FD_ZERO (&k->writefd); /* clear it */
1088 k->wkeepfd = k->writefd; /* set the keeper variable */
1089 k->keepon &= ~KEEP_WRITE; /* disable writing */
1090 k->start100 = Curl_tvnow(); /* timeout count starts now */
1091 didwhat &= ~KEEP_WRITE; /* we didn't write anything actually */
1095 nread = fillbuffer(conn, BUFSIZE);
1098 nread = 0; /* we're done uploading/reading */
1100 /* the signed int typecase of nread of for systems that has
1104 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1105 FD_ZERO(&k->wkeepfd);
1110 /* store number of bytes available for upload */
1111 conn->upload_present = nread;
1113 /* convert LF to CRLF if so asked */
1114 if (data->set.crlf) {
1115 if(data->state.scratch == NULL)
1116 data->state.scratch = malloc(2*BUFSIZE);
1117 if(data->state.scratch == NULL) {
1118 failf (data, "Failed to alloc scratch buffer!");
1119 return CURLE_OUT_OF_MEMORY;
1121 for(i = 0, si = 0; i < nread; i++, si++) {
1122 if (conn->upload_fromhere[i] == 0x0a) {
1123 data->state.scratch[si++] = 0x0d;
1124 data->state.scratch[si] = 0x0a;
1127 data->state.scratch[si] = conn->upload_fromhere[i];
1130 /* only perform the special operation if we really did replace
1134 /* upload from the new (replaced) buffer instead */
1135 conn->upload_fromhere = data->state.scratch;
1137 /* set the new amount too */
1138 conn->upload_present = nread;
1143 /* We have a partial buffer left from a previous "round". Use
1144 that instead of reading more data */
1147 /* write to socket (send away data) */
1148 result = Curl_write(conn,
1149 conn->writesockfd, /* socket to send to */
1150 conn->upload_fromhere, /* buffer pointer */
1151 conn->upload_present, /* buffer size */
1152 &bytes_written); /* actually send away */
1156 if(data->set.verbose)
1157 /* show the data before we change the pointer upload_fromhere */
1158 Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
1161 if(conn->upload_present != bytes_written) {
1162 /* we only wrote a part of the buffer (if anything), deal with it! */
1164 /* store the amount of bytes left in the buffer to write */
1165 conn->upload_present -= bytes_written;
1167 /* advance the pointer where to find the buffer when the next send
1169 conn->upload_fromhere += bytes_written;
1171 writedone = TRUE; /* we are done, stop the loop */
1174 /* we've uploaded that buffer now */
1175 conn->upload_fromhere = k->uploadbuf;
1176 conn->upload_present = 0; /* no more bytes left */
1178 if(k->upload_done) {
1179 /* switch off writing, we're done! */
1180 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1181 FD_ZERO(&k->wkeepfd);
1186 k->writebytecount += bytes_written;
1187 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1189 } while(!writedone); /* loop until we're done writing! */
1193 } while(0); /* just to break out from! */
1195 k->now = Curl_tvnow();
1197 /* Update read/write counters */
1198 if(conn->bytecountp)
1199 *conn->bytecountp = k->bytecount; /* read count */
1200 if(conn->writebytecountp)
1201 *conn->writebytecountp = k->writebytecount; /* write count */
1204 /* no read no write, this is a timeout? */
1205 if (k->write_after_100_header) {
1206 /* This should allow some time for the header to arrive, but only a
1207 very short time as otherwise it'll be too much wasted times too
1210 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1212 Therefore, when a client sends this header field to an origin server
1213 (possibly via a proxy) from which it has never seen a 100 (Continue)
1214 status, the client SHOULD NOT wait for an indefinite period before
1215 sending the request body.
1219 int ms = Curl_tvdiff(k->now, k->start100);
1220 if(ms > CURL_TIMEOUT_EXPECT_100) {
1221 /* we've waited long enough, continue anyway */
1222 k->write_after_100_header = FALSE;
1223 FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1224 k->keepon |= KEEP_WRITE;
1225 k->wkeepfd = k->writefd;
1230 if(Curl_pgrsUpdate(conn))
1231 result = CURLE_ABORTED_BY_CALLBACK;
1233 result = Curl_speedcheck (data, k->now);
1237 if (data->set.timeout &&
1238 ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
1239 failf (data, "Operation timed out with %" FORMAT_OFF_T
1240 " out of %" FORMAT_OFF_T " bytes received",
1241 k->bytecount, conn->size);
1242 return CURLE_OPERATION_TIMEOUTED;
1247 * The transfer has been performed. Just make some general checks before
1251 if(!(data->set.no_body) && (conn->size != -1) &&
1252 (k->bytecount != conn->size) &&
1254 failf(data, "transfer closed with %" FORMAT_OFF_T
1255 " bytes remaining to read",
1256 conn->size - k->bytecount);
1257 return CURLE_PARTIAL_FILE;
1259 else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
1260 failf(data, "transfer closed with at least %d bytes remaining",
1261 conn->proto.http->chunk.datasize);
1262 return CURLE_PARTIAL_FILE;
1264 if(Curl_pgrsUpdate(conn))
1265 return CURLE_ABORTED_BY_CALLBACK;
1268 /* Now update the "done" boolean we return */
1274 CURLcode Curl_readwrite_init(struct connectdata *conn)
1276 struct SessionHandle *data = conn->data;
1277 struct Curl_transfer_keeper *k = &conn->keep;
1279 /* NB: the content encoding software depends on this initialization of
1280 Curl_transfer_keeper. */
1281 memset(k, 0, sizeof(struct Curl_transfer_keeper));
1283 k->start = Curl_tvnow(); /* start time */
1284 k->now = k->start; /* current time is now */
1285 k->header = TRUE; /* assume header */
1286 k->httpversion = -1; /* unknown at this point */
1288 data = conn->data; /* there's the root struct */
1289 k->buf = data->state.buffer;
1290 k->uploadbuf = data->state.uploadbuffer;
1291 k->maxfd = (conn->sockfd>conn->writesockfd?
1292 conn->sockfd:conn->writesockfd)+1;
1293 k->hbufp = data->state.headerbuff;
1294 k->ignorebody=FALSE;
1296 Curl_pgrsTime(data, TIMER_PRETRANSFER);
1297 Curl_speedinit(data);
1299 Curl_pgrsSetUploadCounter(data, 0);
1300 Curl_pgrsSetDownloadCounter(data, 0);
1302 if (!conn->bits.getheader) {
1305 Curl_pgrsSetDownloadSize(data, conn->size);
1307 /* we want header and/or body, if neither then don't do this! */
1308 if(conn->bits.getheader || !data->set.no_body) {
1310 FD_ZERO (&k->readfd); /* clear it */
1311 if(conn->sockfd != CURL_SOCKET_BAD) {
1312 FD_SET (conn->sockfd, &k->readfd); /* read socket */
1313 k->keepon |= KEEP_READ;
1316 FD_ZERO (&k->writefd); /* clear it */
1317 if(conn->writesockfd != CURL_SOCKET_BAD) {
1320 Even if we require a 100-return code before uploading data, we might
1321 need to write data before that since the REQUEST may not have been
1322 finished sent off just yet.
1324 Thus, we must check if the request has been sent before we set the
1325 state info where we wait for the 100-return code
1327 if (data->set.expect100header &&
1328 (conn->proto.http->sending == HTTPSEND_BODY)) {
1329 /* wait with write until we either got 100-continue or a timeout */
1330 k->write_after_100_header = TRUE;
1331 k->start100 = k->start;
1334 if(data->set.expect100header)
1335 /* when we've sent off the rest of the headers, we must await a
1337 k->wait100_after_headers = TRUE;
1338 FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1339 k->keepon |= KEEP_WRITE;
1343 /* get these in backup variables to be able to restore them on each lap in
1344 the select() loop */
1345 k->rkeepfd = k->readfd;
1346 k->wkeepfd = k->writefd;
1353 void Curl_single_fdset(struct connectdata *conn,
1354 fd_set *read_fd_set,
1355 fd_set *write_fd_set,
1359 *max_fd = -1; /* init */
1360 if(conn->keep.keepon & KEEP_READ) {
1361 FD_SET(conn->sockfd, read_fd_set);
1362 *max_fd = conn->sockfd;
1363 conn->keep.readfdp = read_fd_set; /* store the address of the set */
1365 if(conn->keep.keepon & KEEP_WRITE) {
1366 FD_SET(conn->writesockfd, write_fd_set);
1368 /* since sockets are curl_socket_t nowadays, we typecast it to int here
1369 to compare it nicely */
1370 if((int)conn->writesockfd > *max_fd)
1371 *max_fd = conn->writesockfd;
1372 conn->keep.writefdp = write_fd_set; /* store the address of the set */
1374 /* we don't use exceptions, only touch that one to prevent compiler
1376 *exc_fd_set = *exc_fd_set;
1383 * This function is what performs the actual transfer. It is capable of
1384 * doing both ways simultaneously.
1385 * The transfer must already have been setup by a call to Curl_Transfer().
1387 * Note that headers are created in a preallocated buffer of a default size.
1388 * That buffer can be enlarged on demand, but it is never shrunken again.
1390 * Parts of this function was once written by the friendly Mark Butler
1391 * <butlerm@xmission.com>.
1395 Transfer(struct connectdata *conn)
1397 struct SessionHandle *data = conn->data;
1399 struct Curl_transfer_keeper *k = &conn->keep;
1402 if(!(conn->protocol & PROT_FILE))
1403 /* Only do this if we are not transferring FILE:, since the file: treatment
1405 Curl_readwrite_init(conn);
1407 if((conn->sockfd == CURL_SOCKET_BAD) && (conn->writesockfd == CURL_SOCKET_BAD))
1408 /* nothing to read, nothing to write, we're already OK! */
1411 /* we want header and/or body, if neither then don't do this! */
1412 if(!conn->bits.getheader && data->set.no_body)
1415 k->writefdp = &k->writefd; /* store the address of the set */
1416 k->readfdp = &k->readfd; /* store the address of the set */
1419 struct timeval interval;
1420 k->readfd = k->rkeepfd; /* set these every lap in the loop */
1421 k->writefd = k->wkeepfd;
1422 interval.tv_sec = 1;
1423 interval.tv_usec = 0;
1425 switch (select (k->maxfd, k->readfdp, k->writefdp, NULL, &interval)) {
1426 case -1: /* select() error, stop reading */
1428 /* The EINTR is not serious, and it seems you might get this more
1429 ofen when using the lib in a multi-threaded environment! */
1434 done = TRUE; /* no more read or write */
1436 case 0: /* timeout */
1437 default: /* readable descriptors */
1438 result = Curl_readwrite(conn, &done);
1444 /* "done" signals to us if the transfer(s) are ready */
1450 CURLcode Curl_pretransfer(struct SessionHandle *data)
1452 if(!data->change.url)
1453 /* we can't do anything wihout URL */
1454 return CURLE_URL_MALFORMAT;
1458 /* Init the SSL session ID cache here. We do it here since we want to do
1459 it after the *_setopt() calls (that could change the size of the cache)
1460 but before any transfer takes place. */
1461 CURLcode res = Curl_SSL_InitSessions(data, data->set.ssl.numsessions);
1467 data->set.followlocation=0; /* reset the location-follow counter */
1468 data->state.this_is_a_follow = FALSE; /* reset this */
1469 data->state.errorbuf = FALSE; /* no error has occurred */
1471 /* set preferred authentication, default to basic */
1473 data->state.authstage = 0; /* initialize authentication later */
1475 /* If there was a list of cookie files to read and we haven't done it before,
1477 if(data->change.cookielist) {
1478 struct curl_slist *list = data->change.cookielist;
1479 Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
1481 data->cookies = Curl_cookie_init(data,
1484 data->set.cookiesession);
1487 Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
1488 curl_slist_free_all(data->change.cookielist); /* clean up list */
1489 data->change.cookielist = NULL; /* don't do this again! */
1494 /* Allow data->set.use_port to set which port to use. This needs to be
1495 * disabled for example when we follow Location: headers to URLs using
1496 * different ports! */
1497 data->state.allow_port = TRUE;
1499 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1500 /*************************************************************
1501 * Tell signal handler to ignore SIGPIPE
1502 *************************************************************/
1503 if(!data->set.no_signal)
1504 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1507 Curl_initinfo(data); /* reset session-specific information "variables" */
1508 Curl_pgrsStartNow(data);
1513 CURLcode Curl_posttransfer(struct SessionHandle *data)
1515 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1516 /* restore the signal handler for SIGPIPE before we get back */
1517 if(!data->set.no_signal)
1518 signal(SIGPIPE, data->state.prev_signal);
1520 (void)data; /* unused parameter */
1526 static int strlen_url(char *url)
1530 bool left=TRUE; /* left side of the ? */
1532 for(ptr=url; *ptr; ptr++) {
1550 static void strcpy_url(char *output, char *url)
1552 /* we must add this with whitespace-replacing */
1555 char *optr = output;
1556 for(iptr = url; /* read from here */
1557 *iptr; /* until zero byte */
1567 *optr++='%'; /* add a '%' */
1568 *optr++='2'; /* add a '2' */
1569 *optr++='0'; /* add a '0' */
1572 *optr++='+'; /* add a '+' here */
1576 *optr=0; /* zero terminate output buffer */
1580 CURLcode Curl_follow(struct SessionHandle *data,
1581 char *newurl) /* this 'newurl' is the Location: string,
1582 and it must be malloc()ed before passed
1585 /* Location: redirect */
1586 char prot[16]; /* URL protocol string storage */
1587 char letter; /* used for a silly sscanf */
1591 if (data->set.maxredirs &&
1592 (data->set.followlocation >= data->set.maxredirs)) {
1593 failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
1594 return CURLE_TOO_MANY_REDIRECTS;
1597 /* mark the next request as a followed location: */
1598 data->state.this_is_a_follow = TRUE;
1600 data->set.followlocation++; /* count location-followers */
1602 if(data->set.http_auto_referer) {
1603 /* We are asked to automatically set the previous URL as the
1604 referer when we get the next URL. We pick the ->url field,
1605 which may or may not be 100% correct */
1607 if(data->change.referer_alloc)
1608 /* If we already have an allocated referer, free this first */
1609 free(data->change.referer);
1611 data->change.referer = strdup(data->change.url);
1612 data->change.referer_alloc = TRUE; /* yes, free this later */
1615 if(2 != sscanf(newurl, "%15[^?&/:]://%c", prot, &letter)) {
1617 *DANG* this is an RFC 2068 violation. The URL is supposed
1618 to be absolute and this doesn't seem to be that!
1620 Instead, we have to TRY to append this new path to the old URL
1621 to the right of the host part. Oh crap, this is doomed to cause
1622 problems in the future...
1627 char *useurl = newurl;
1630 /* we must make our own copy of the URL to play with, as it may
1631 point to read-only data */
1632 char *url_clone=strdup(data->change.url);
1635 return CURLE_OUT_OF_MEMORY; /* skip out of this NOW */
1637 /* protsep points to the start of the host name */
1638 protsep=strstr(url_clone, "//");
1642 protsep+=2; /* pass the slashes */
1644 if('/' != newurl[0]) {
1647 /* First we need to find out if there's a ?-letter in the URL,
1648 and cut it and the right-side of that off */
1649 pathsep = strrchr(protsep, '?');
1653 /* we have a relative path to append to the last slash if
1654 there's one available */
1655 pathsep = strrchr(protsep, '/');
1659 /* Check if there's any slash after the host name, and if so,
1660 remember that position instead */
1661 pathsep = strchr(protsep, '/');
1663 protsep = pathsep+1;
1667 /* now deal with one "./" or any amount of "../" in the newurl
1668 and act accordingly */
1670 if((useurl[0] == '.') && (useurl[1] == '/'))
1671 useurl+=2; /* just skip the "./" */
1673 while((useurl[0] == '.') &&
1674 (useurl[1] == '.') &&
1675 (useurl[2] == '/')) {
1677 useurl+=3; /* pass the "../" */
1682 /* cut off one more level from the right of the original URL */
1683 pathsep = strrchr(protsep, '/');
1694 /* We got a new absolute path for this server, cut off from the
1696 pathsep = strchr(protsep, '/');
1700 /* There was no slash. Now, since we might be operating on a badly
1701 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1702 use a slash separator as it is supposed to, we need to check for a
1703 ?-letter as well! */
1704 pathsep = strchr(protsep, '?');
1710 /* If the new part contains a space, this is a mighty stupid redirect
1711 but we still make an effort to do "right". To the left of a '?'
1712 letter we replace each space with %20 while it is replaced with '+'
1713 on the right side of the '?' letter.
1715 newlen = strlen_url(useurl);
1717 urllen = strlen(url_clone);
1719 newest=(char *)malloc( urllen + 1 + /* possible slash */
1720 newlen + 1 /* zero byte */);
1723 return CURLE_OUT_OF_MEMORY; /* go out from this */
1725 /* copy over the root url part */
1726 memcpy(newest, url_clone, urllen);
1728 /* check if we need to append a slash */
1729 if(('/' == useurl[0]) || (protsep && !*protsep))
1732 newest[urllen++]='/';
1734 /* then append the new piece on the right side */
1735 strcpy_url(&newest[urllen], useurl);
1737 free(newurl); /* newurl is the allocated pointer */
1742 /* This is an absolute URL, don't allow the custom port number */
1743 data->state.allow_port = FALSE;
1745 if(strchr(newurl, ' ')) {
1746 /* This new URL contains at least one space, this is a mighty stupid
1747 redirect but we still make an effort to do "right". */
1748 newlen = strlen_url(newurl);
1750 newest = malloc(newlen+1); /* get memory for this */
1752 strcpy_url(newest, newurl); /* create a space-free URL */
1754 free(newurl); /* that was no good */
1755 newurl = newest; /* use this instead now */
1761 if(data->change.url_alloc)
1762 free(data->change.url);
1764 data->change.url_alloc = TRUE; /* the URL is allocated */
1766 data->change.url = newurl;
1767 newurl = NULL; /* don't free! */
1769 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1772 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1773 * differently based on exactly what return code there was.
1775 * News from 7.10.6: we can also get here on a 401, in case we act on a
1776 * HTTP authentication scheme other than Basic.
1778 switch(data->info.httpcode) {
1780 /* Act on an authentication, we keep on moving and do the Authorization:
1781 XXXX header in the HTTP request code snippet */
1783 case 300: /* Multiple Choices */
1784 case 306: /* Not used */
1785 case 307: /* Temporary Redirect */
1786 default: /* for all unknown ones */
1787 /* These are explicitly mention since I've checked RFC2616 and they
1788 * seem to be OK to POST to.
1791 case 301: /* Moved Permanently */
1792 /* (quote from RFC2616, section 10.3.2):
1794 * Note: When automatically redirecting a POST request after
1795 * receiving a 301 status code, some existing HTTP/1.0 user agents
1796 * will erroneously change it into a GET request.
1799 * Warning: Because most of importants user agents do this clear
1800 * RFC2616 violation, many webservers expect this misbehavior. So
1801 * these servers often answers to a POST request with an error page.
1802 * To be sure that libcurl gets the page that most user agents
1803 * would get, libcurl has to force GET:
1805 if( data->set.httpreq == HTTPREQ_POST
1806 || data->set.httpreq == HTTPREQ_POST_FORM) {
1808 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1809 data->set.httpreq = HTTPREQ_GET;
1812 case 302: /* Found */
1815 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1816 to change the method on the redirected request. However, most
1817 existing user agent implementations treat 302 as if it were a 303
1818 response, performing a GET on the Location field-value regardless
1819 of the original request method. The status codes 303 and 307 have
1820 been added for servers that wish to make unambiguously clear which
1821 kind of reaction is expected of the client.
1825 Note: Many pre-HTTP/1.1 user agents do not understand the 303
1826 status. When interoperability with such clients is a concern, the
1827 302 status code may be used instead, since most user agents react
1828 to a 302 response as described here for 303.
1830 case 303: /* See Other */
1831 /* Disable both types of POSTs, since doing a second POST when
1832 * following isn't what anyone would want! */
1833 if(data->set.httpreq != HTTPREQ_GET) {
1834 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1835 infof(data, "Disables POST, goes with %s\n",
1836 data->set.no_body?"HEAD":"GET");
1839 case 304: /* Not Modified */
1840 /* 304 means we did a conditional request and it was "Not modified".
1841 * We shouldn't get any Location: header in this response!
1844 case 305: /* Use Proxy */
1845 /* (quote from RFC2616, section 10.3.6):
1846 * "The requested resource MUST be accessed through the proxy given
1847 * by the Location field. The Location field gives the URI of the
1848 * proxy. The recipient is expected to repeat this single request
1849 * via the proxy. 305 responses MUST only be generated by origin
1854 Curl_pgrsTime(data, TIMER_REDIRECT);
1855 Curl_pgrsResetTimes(data);
1860 CURLcode Curl_perform(struct SessionHandle *data)
1864 struct connectdata *conn=NULL;
1865 char *newurl = NULL; /* possibly a new URL to follow to! */
1867 data->state.used_interface = Curl_if_easy;
1869 res = Curl_pretransfer(data);
1874 * It is important that there is NO 'return' from this function at any other
1875 * place than falling down to the end of the function! This is because we
1876 * have cleanup stuff that must be done before we get back, and that is only
1877 * performed after this do-while loop.
1881 int urlchanged = FALSE;
1884 Curl_pgrsTime(data, TIMER_STARTSINGLE);
1885 data->change.url_changed = FALSE;
1886 res = Curl_connect(data, &conn, &async);
1888 if((CURLE_OK == res) && async) {
1889 /* Now, if async is TRUE here, we need to wait for the name
1891 res = Curl_wait_for_resolv(conn, NULL);
1893 /* Resolved, continue with the connection */
1894 res = Curl_async_resolved(conn);
1899 /* If a callback (or something) has altered the URL we should use within
1900 the Curl_connect(), we detect it here and act as if we are redirected
1902 urlchanged = data->change.url_changed;
1903 if ((CURLE_OK == res) && urlchanged) {
1904 res = Curl_done(conn);
1905 if(CURLE_OK == res) {
1906 char *gotourl = strdup(data->change.url);
1907 res = Curl_follow(data, gotourl);
1912 } while (urlchanged && res == CURLE_OK) ;
1914 if(res == CURLE_OK) {
1915 res = Curl_do(&conn);
1917 if(res == CURLE_OK) {
1918 res = Transfer(conn); /* now fetch that URL please */
1919 if(res == CURLE_OK) {
1921 if((conn->keep.bytecount == 0) &&
1922 (conn->sockerror == ECONNRESET) &&
1924 /* We got no data, the connection was reset and we did attempt
1925 to re-use a connection. This smells like we were too fast to
1926 re-use a connection that was closed when we wanted to read
1927 from it. Bad luck. Let's simulate a redirect to the same URL
1929 infof(data, "Connection reset, retrying a fresh connect\n");
1930 newurl = strdup(conn->data->change.url);
1932 conn->bits.close = TRUE; /* close this connection */
1933 conn->bits.retry = TRUE; /* mark this as a connection we're about
1934 to retry. Marking it this way should
1935 prevent i.e HTTP transfers to return
1936 error just because nothing has been
1941 * We must duplicate the new URL here as the connection data
1942 * may be free()ed in the Curl_done() function.
1944 newurl = conn->newurl?strdup(conn->newurl):NULL;
1947 /* The transfer phase returned error, we mark the connection to get
1948 * closed to prevent being re-used. This is becasue we can't
1949 * possibly know if the connection is in a good shape or not now. */
1950 conn->bits.close = TRUE;
1952 if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
1953 /* if we failed anywhere, we must clean up the secondary socket if
1955 sclose(conn->sock[SECONDARYSOCKET]);
1956 conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD;
1960 /* Always run Curl_done(), even if some of the previous calls
1961 failed, but return the previous (original) error code */
1962 res2 = Curl_done(conn);
1969 * Important: 'conn' cannot be used here, since it may have been closed
1970 * in 'Curl_done' or other functions.
1973 if((res == CURLE_OK) && newurl) {
1974 res = Curl_follow(data, newurl);
1975 if(CURLE_OK == res) {
1981 break; /* it only reaches here when this shouldn't loop */
1983 } while(1); /* loop if Location: */
1988 /* run post-transfer uncondionally, but don't clobber the return code if
1989 we already have an error code recorder */
1990 res2 = Curl_posttransfer(data);
1998 Curl_Transfer(struct connectdata *c_conn, /* connection data */
1999 int sockindex, /* socket index to read from or -1 */
2000 curl_off_t size, /* -1 if unknown at this point */
2001 bool getheader, /* TRUE if header parsing is wanted */
2002 curl_off_t *bytecountp, /* return number of bytes read or NULL */
2003 int writesockindex, /* socket index to write to, it may very
2004 well be the same we read from. -1
2006 curl_off_t *writecountp /* return number of bytes written or
2010 struct connectdata *conn = (struct connectdata *)c_conn;
2012 return CURLE_BAD_FUNCTION_ARGUMENT;
2014 curlassert((sockindex <= 1) && (sockindex >= -1));
2016 /* now copy all input parameters */
2017 conn->sockfd = sockindex==-1?
2018 CURL_SOCKET_BAD:conn->sock[sockindex];
2020 conn->bits.getheader = getheader;
2021 conn->bytecountp = bytecountp;
2022 conn->writesockfd = writesockindex==-1?
2023 CURL_SOCKET_BAD:conn->sock[writesockindex];
2024 conn->writebytecountp = writecountp;