VirtualBox

source: vbox/trunk/src/libs/curl-7.64.0/lib/transfer.c@ 86264

Last change on this file since 86264 was 85671, checked in by vboxsync, 4 years ago

Export out internal curl copy to make it a lot simpler to build VBox (OSE) on Windows. bugref:9814

  • Property svn:eol-style set to native
File size: 64.1 KB
Line 
1/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2019, Daniel Stenberg, <[email protected]>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.haxx.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23#include "curl_setup.h"
24#include "strtoofft.h"
25
26#ifdef HAVE_NETINET_IN_H
27#include <netinet/in.h>
28#endif
29#ifdef HAVE_NETDB_H
30#include <netdb.h>
31#endif
32#ifdef HAVE_ARPA_INET_H
33#include <arpa/inet.h>
34#endif
35#ifdef HAVE_NET_IF_H
36#include <net/if.h>
37#endif
38#ifdef HAVE_SYS_IOCTL_H
39#include <sys/ioctl.h>
40#endif
41#ifdef HAVE_SIGNAL_H
42#include <signal.h>
43#endif
44
45#ifdef HAVE_SYS_PARAM_H
46#include <sys/param.h>
47#endif
48
49#ifdef HAVE_SYS_SELECT_H
50#include <sys/select.h>
51#endif
52
53#ifndef HAVE_SOCKET
54#error "We can't compile without socket() support!"
55#endif
56
57#include "urldata.h"
58#include <curl/curl.h>
59#include "netrc.h"
60
61#include "content_encoding.h"
62#include "hostip.h"
63#include "transfer.h"
64#include "sendf.h"
65#include "speedcheck.h"
66#include "progress.h"
67#include "http.h"
68#include "url.h"
69#include "getinfo.h"
70#include "vtls/vtls.h"
71#include "select.h"
72#include "multiif.h"
73#include "connect.h"
74#include "non-ascii.h"
75#include "http2.h"
76#include "mime.h"
77#include "strcase.h"
78#include "urlapi-int.h"
79
80/* The last 3 #include files should be in this order */
81#include "curl_printf.h"
82#include "curl_memory.h"
83#include "memdebug.h"
84
85#if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
86 !defined(CURL_DISABLE_IMAP)
87/*
88 * checkheaders() checks the linked list of custom headers for a
89 * particular header (prefix). Provide the prefix without colon!
90 *
91 * Returns a pointer to the first matching header or NULL if none matched.
92 */
93char *Curl_checkheaders(const struct connectdata *conn,
94 const char *thisheader)
95{
96 struct curl_slist *head;
97 size_t thislen = strlen(thisheader);
98 struct Curl_easy *data = conn->data;
99
100 for(head = data->set.headers; head; head = head->next) {
101 if(strncasecompare(head->data, thisheader, thislen) &&
102 Curl_headersep(head->data[thislen]) )
103 return head->data;
104 }
105
106 return NULL;
107}
108#endif
109
110CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
111{
112 if(!data->state.ulbuf) {
113 data->state.ulbuf = malloc(data->set.upload_buffer_size);
114 if(!data->state.ulbuf)
115 return CURLE_OUT_OF_MEMORY;
116 }
117 return CURLE_OK;
118}
119
120#ifndef CURL_DISABLE_HTTP
121/*
122 * This function will be called to loop through the trailers buffer
123 * until no more data is available for sending.
124 */
125static size_t Curl_trailers_read(char *buffer, size_t size, size_t nitems,
126 void *raw)
127{
128 struct Curl_easy *data = (struct Curl_easy *)raw;
129 Curl_send_buffer *trailers_buf = data->state.trailers_buf;
130 size_t bytes_left = trailers_buf->size_used-data->state.trailers_bytes_sent;
131 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
132 if(to_copy) {
133 memcpy(buffer,
134 &trailers_buf->buffer[data->state.trailers_bytes_sent],
135 to_copy);
136 data->state.trailers_bytes_sent += to_copy;
137 }
138 return to_copy;
139}
140
141static size_t Curl_trailers_left(void *raw)
142{
143 struct Curl_easy *data = (struct Curl_easy *)raw;
144 Curl_send_buffer *trailers_buf = data->state.trailers_buf;
145 return trailers_buf->size_used - data->state.trailers_bytes_sent;
146}
147#endif
148
149/*
150 * This function will call the read callback to fill our buffer with data
151 * to upload.
152 */
153CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
154 size_t *nreadp)
155{
156 struct Curl_easy *data = conn->data;
157 size_t buffersize = bytes;
158 size_t nread;
159
160#ifndef CURL_DISABLE_HTTP
161 struct curl_slist *trailers = NULL;
162 CURLcode c;
163 int trailers_ret_code;
164#endif
165
166 curl_read_callback readfunc = NULL;
167 void *extra_data = NULL;
168 bool added_crlf = FALSE;
169
170#ifdef CURL_DOES_CONVERSIONS
171 bool sending_http_headers = FALSE;
172
173 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
174 const struct HTTP *http = data->req.protop;
175
176 if(http->sending == HTTPSEND_REQUEST)
177 /* We're sending the HTTP request headers, not the data.
178 Remember that so we don't re-translate them into garbage. */
179 sending_http_headers = TRUE;
180 }
181#endif
182
183#ifndef CURL_DISABLE_HTTP
184 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
185 /* at this point we already verified that the callback exists
186 so we compile and store the trailers buffer, then proceed */
187 infof(data,
188 "Moving trailers state machine from initialized to sending.\n");
189 data->state.trailers_state = TRAILERS_SENDING;
190 data->state.trailers_buf = Curl_add_buffer_init();
191 if(!data->state.trailers_buf) {
192 failf(data, "Unable to allocate trailing headers buffer !");
193 return CURLE_OUT_OF_MEMORY;
194 }
195 data->state.trailers_bytes_sent = 0;
196 Curl_set_in_callback(data, true);
197 trailers_ret_code = data->set.trailer_callback(&trailers,
198 data->set.trailer_data);
199 Curl_set_in_callback(data, false);
200 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
201 c = Curl_http_compile_trailers(trailers, data->state.trailers_buf, data);
202 }
203 else {
204 failf(data, "operation aborted by trailing headers callback");
205 *nreadp = 0;
206 c = CURLE_ABORTED_BY_CALLBACK;
207 }
208 if(c != CURLE_OK) {
209 Curl_add_buffer_free(&data->state.trailers_buf);
210 curl_slist_free_all(trailers);
211 return c;
212 }
213 infof(data, "Successfully compiled trailers.\r\n");
214 curl_slist_free_all(trailers);
215 }
216#endif
217
218 /* if we are transmitting trailing data, we don't need to write
219 a chunk size so we skip this */
220 if(data->req.upload_chunky &&
221 data->state.trailers_state == TRAILERS_NONE) {
222 /* if chunked Transfer-Encoding */
223 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
224 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
225 }
226
227#ifndef CURL_DISABLE_HTTP
228 if(data->state.trailers_state == TRAILERS_SENDING) {
229 /* if we're here then that means that we already sent the last empty chunk
230 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
231 pulling trailing data until we ²have no more at which point we
232 simply return to the previous point in the state machine as if
233 nothing happened.
234 */
235 readfunc = Curl_trailers_read;
236 extra_data = (void *)data;
237 }
238 else
239#endif
240 {
241 readfunc = data->state.fread_func;
242 extra_data = data->state.in;
243 }
244
245 Curl_set_in_callback(data, true);
246 nread = readfunc(data->req.upload_fromhere, 1,
247 buffersize, extra_data);
248 Curl_set_in_callback(data, false);
249
250 if(nread == CURL_READFUNC_ABORT) {
251 failf(data, "operation aborted by callback");
252 *nreadp = 0;
253 return CURLE_ABORTED_BY_CALLBACK;
254 }
255 if(nread == CURL_READFUNC_PAUSE) {
256 struct SingleRequest *k = &data->req;
257
258 if(conn->handler->flags & PROTOPT_NONETWORK) {
259 /* protocols that work without network cannot be paused. This is
260 actually only FILE:// just now, and it can't pause since the transfer
261 isn't done using the "normal" procedure. */
262 failf(data, "Read callback asked for PAUSE when not supported!");
263 return CURLE_READ_ERROR;
264 }
265
266 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
267 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
268 if(data->req.upload_chunky) {
269 /* Back out the preallocation done above */
270 data->req.upload_fromhere -= (8 + 2);
271 }
272 *nreadp = 0;
273
274 return CURLE_OK; /* nothing was read */
275 }
276 else if(nread > buffersize) {
277 /* the read function returned a too large value */
278 *nreadp = 0;
279 failf(data, "read function returned funny value");
280 return CURLE_READ_ERROR;
281 }
282
283 if(!data->req.forbidchunk && data->req.upload_chunky) {
284 /* if chunked Transfer-Encoding
285 * build chunk:
286 *
287 * <HEX SIZE> CRLF
288 * <DATA> CRLF
289 */
290 /* On non-ASCII platforms the <DATA> may or may not be
291 translated based on set.prefer_ascii while the protocol
292 portion must always be translated to the network encoding.
293 To further complicate matters, line end conversion might be
294 done later on, so we need to prevent CRLFs from becoming
295 CRCRLFs if that's the case. To do this we use bare LFs
296 here, knowing they'll become CRLFs later on.
297 */
298
299 char hexbuffer[11];
300 const char *endofline_native;
301 const char *endofline_network;
302 int hexlen = 0;
303
304 if(
305#ifdef CURL_DO_LINEEND_CONV
306 (data->set.prefer_ascii) ||
307#endif
308 (data->set.crlf)) {
309 /* \n will become \r\n later on */
310 endofline_native = "\n";
311 endofline_network = "\x0a";
312 }
313 else {
314 endofline_native = "\r\n";
315 endofline_network = "\x0d\x0a";
316 }
317
318 /* if we're not handling trailing data, proceed as usual */
319 if(data->state.trailers_state != TRAILERS_SENDING) {
320 hexlen = msnprintf(hexbuffer, sizeof(hexbuffer),
321 "%zx%s", nread, endofline_native);
322
323 /* move buffer pointer */
324 data->req.upload_fromhere -= hexlen;
325 nread += hexlen;
326
327 /* copy the prefix to the buffer, leaving out the NUL */
328 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
329
330 /* always append ASCII CRLF to the data unless
331 we have a valid trailer callback */
332#ifndef CURL_DISABLE_HTTP
333 if((nread-hexlen) == 0 &&
334 data->set.trailer_callback != NULL &&
335 data->state.trailers_state == TRAILERS_NONE) {
336 data->state.trailers_state = TRAILERS_INITIALIZED;
337 }
338 else
339#endif
340 {
341 memcpy(data->req.upload_fromhere + nread,
342 endofline_network,
343 strlen(endofline_network));
344 added_crlf = TRUE;
345 }
346 }
347
348#ifdef CURL_DOES_CONVERSIONS
349 {
350 CURLcode result;
351 size_t length;
352 if(data->set.prefer_ascii)
353 /* translate the protocol and data */
354 length = nread;
355 else
356 /* just translate the protocol portion */
357 length = strlen(hexbuffer);
358 result = Curl_convert_to_network(data, data->req.upload_fromhere,
359 length);
360 /* Curl_convert_to_network calls failf if unsuccessful */
361 if(result)
362 return result;
363 }
364#endif /* CURL_DOES_CONVERSIONS */
365
366#ifndef CURL_DISABLE_HTTP
367 if(data->state.trailers_state == TRAILERS_SENDING &&
368 !Curl_trailers_left(data)) {
369 Curl_add_buffer_free(&data->state.trailers_buf);
370 data->state.trailers_state = TRAILERS_DONE;
371 data->set.trailer_data = NULL;
372 data->set.trailer_callback = NULL;
373 /* mark the transfer as done */
374 data->req.upload_done = TRUE;
375 infof(data, "Signaling end of chunked upload after trailers.\n");
376 }
377 else
378#endif
379 if((nread - hexlen) == 0 &&
380 data->state.trailers_state != TRAILERS_INITIALIZED) {
381 /* mark this as done once this chunk is transferred */
382 data->req.upload_done = TRUE;
383 infof(data,
384 "Signaling end of chunked upload via terminating chunk.\n");
385 }
386
387 if(added_crlf)
388 nread += strlen(endofline_network); /* for the added end of line */
389 }
390#ifdef CURL_DOES_CONVERSIONS
391 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
392 CURLcode result;
393 result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
394 /* Curl_convert_to_network calls failf if unsuccessful */
395 if(result)
396 return result;
397 }
398#endif /* CURL_DOES_CONVERSIONS */
399
400 *nreadp = nread;
401
402 return CURLE_OK;
403}
404
405
406/*
407 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
408 * POST/PUT with multi-pass authentication when a sending was denied and a
409 * resend is necessary.
410 */
411CURLcode Curl_readrewind(struct connectdata *conn)
412{
413 struct Curl_easy *data = conn->data;
414 curl_mimepart *mimepart = &data->set.mimepost;
415
416 conn->bits.rewindaftersend = FALSE; /* we rewind now */
417
418 /* explicitly switch off sending data on this connection now since we are
419 about to restart a new transfer and thus we want to avoid inadvertently
420 sending more data on the existing connection until the next transfer
421 starts */
422 data->req.keepon &= ~KEEP_SEND;
423
424 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
425 CURLOPT_HTTPPOST, call app to rewind
426 */
427 if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
428 struct HTTP *http = data->req.protop;
429
430 if(http->sendit)
431 mimepart = http->sendit;
432 }
433 if(data->set.postfields)
434 ; /* do nothing */
435 else if(data->set.httpreq == HTTPREQ_POST_MIME ||
436 data->set.httpreq == HTTPREQ_POST_FORM) {
437 if(Curl_mime_rewind(mimepart)) {
438 failf(data, "Cannot rewind mime/post data");
439 return CURLE_SEND_FAIL_REWIND;
440 }
441 }
442 else {
443 if(data->set.seek_func) {
444 int err;
445
446 Curl_set_in_callback(data, true);
447 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
448 Curl_set_in_callback(data, false);
449 if(err) {
450 failf(data, "seek callback returned error %d", (int)err);
451 return CURLE_SEND_FAIL_REWIND;
452 }
453 }
454 else if(data->set.ioctl_func) {
455 curlioerr err;
456
457 Curl_set_in_callback(data, true);
458 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
459 data->set.ioctl_client);
460 Curl_set_in_callback(data, false);
461 infof(data, "the ioctl callback returned %d\n", (int)err);
462
463 if(err) {
464 /* FIXME: convert to a human readable error message */
465 failf(data, "ioctl callback returned error %d", (int)err);
466 return CURLE_SEND_FAIL_REWIND;
467 }
468 }
469 else {
470 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
471 given FILE * stream and we can actually attempt to rewind that
472 ourselves with fseek() */
473 if(data->state.fread_func == (curl_read_callback)fread) {
474 if(-1 != fseek(data->state.in, 0, SEEK_SET))
475 /* successful rewind */
476 return CURLE_OK;
477 }
478
479 /* no callback set or failure above, makes us fail at once */
480 failf(data, "necessary data rewind wasn't possible");
481 return CURLE_SEND_FAIL_REWIND;
482 }
483 }
484 return CURLE_OK;
485}
486
487static int data_pending(const struct connectdata *conn)
488{
489 /* in the case of libssh2, we can never be really sure that we have emptied
490 its internal buffers so we MUST always try until we get EAGAIN back */
491 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
492#if defined(USE_NGHTTP2)
493 Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
494 /* For HTTP/2, we may read up everything including response body
495 with header fields in Curl_http_readwrite_headers. If no
496 content-length is provided, curl waits for the connection
497 close, which we emulate it using conn->proto.httpc.closed =
498 TRUE. The thing is if we read everything, then http2_recv won't
499 be called and we cannot signal the HTTP/2 stream has closed. As
500 a workaround, we return nonzero here to call http2_recv. */
501 ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
502#else
503 Curl_ssl_data_pending(conn, FIRSTSOCKET);
504#endif
505}
506
507static void read_rewind(struct connectdata *conn,
508 size_t thismuch)
509{
510 DEBUGASSERT(conn->read_pos >= thismuch);
511
512 conn->read_pos -= thismuch;
513 conn->bits.stream_was_rewound = TRUE;
514
515#ifdef DEBUGBUILD
516 {
517 char buf[512 + 1];
518 size_t show;
519
520 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
521 if(conn->master_buffer) {
522 memcpy(buf, conn->master_buffer + conn->read_pos, show);
523 buf[show] = '\0';
524 }
525 else {
526 buf[0] = '\0';
527 }
528
529 DEBUGF(infof(conn->data,
530 "Buffer after stream rewind (read_pos = %zu): [%s]\n",
531 conn->read_pos, buf));
532 }
533#endif
534}
535
536/*
537 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
538 * remote document with the time provided by CURLOPT_TIMEVAL
539 */
540bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
541{
542 if((timeofdoc == 0) || (data->set.timevalue == 0))
543 return TRUE;
544
545 switch(data->set.timecondition) {
546 case CURL_TIMECOND_IFMODSINCE:
547 default:
548 if(timeofdoc <= data->set.timevalue) {
549 infof(data,
550 "The requested document is not new enough\n");
551 data->info.timecond = TRUE;
552 return FALSE;
553 }
554 break;
555 case CURL_TIMECOND_IFUNMODSINCE:
556 if(timeofdoc >= data->set.timevalue) {
557 infof(data,
558 "The requested document is not old enough\n");
559 data->info.timecond = TRUE;
560 return FALSE;
561 }
562 break;
563 }
564
565 return TRUE;
566}
567
568/*
569 * Go ahead and do a read if we have a readable socket or if
570 * the stream was rewound (in which case we have data in a
571 * buffer)
572 *
573 * return '*comeback' TRUE if we didn't properly drain the socket so this
574 * function should get called again without select() or similar in between!
575 */
576static CURLcode readwrite_data(struct Curl_easy *data,
577 struct connectdata *conn,
578 struct SingleRequest *k,
579 int *didwhat, bool *done,
580 bool *comeback)
581{
582 CURLcode result = CURLE_OK;
583 ssize_t nread; /* number of bytes read */
584 size_t excess = 0; /* excess bytes read */
585 bool readmore = FALSE; /* used by RTP to signal for more data */
586 int maxloops = 100;
587
588 *done = FALSE;
589 *comeback = FALSE;
590
591 /* This is where we loop until we have read everything there is to
592 read or we get a CURLE_AGAIN */
593 do {
594 bool is_empty_data = FALSE;
595 size_t buffersize = data->set.buffer_size;
596 size_t bytestoread = buffersize;
597
598 if(
599#if defined(USE_NGHTTP2)
600 /* For HTTP/2, read data without caring about the content
601 length. This is safe because body in HTTP/2 is always
602 segmented thanks to its framing layer. Meanwhile, we have to
603 call Curl_read to ensure that http2_handle_stream_close is
604 called when we read all incoming bytes for a particular
605 stream. */
606 !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
607 conn->httpversion == 20) &&
608#endif
609 k->size != -1 && !k->header) {
610 /* make sure we don't read "too much" if we can help it since we
611 might be pipelining and then someone else might want to read what
612 follows! */
613 curl_off_t totalleft = k->size - k->bytecount;
614 if(totalleft < (curl_off_t)bytestoread)
615 bytestoread = (size_t)totalleft;
616 }
617
618 if(bytestoread) {
619 /* receive data from the network! */
620 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
621
622 /* read would've blocked */
623 if(CURLE_AGAIN == result)
624 break; /* get out of loop */
625
626 if(result>0)
627 return result;
628 }
629 else {
630 /* read nothing but since we wanted nothing we consider this an OK
631 situation to proceed from */
632 DEBUGF(infof(data, "readwrite_data: we're done!\n"));
633 nread = 0;
634 }
635
636 if((k->bytecount == 0) && (k->writebytecount == 0)) {
637 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
638 if(k->exp100 > EXP100_SEND_DATA)
639 /* set time stamp to compare with when waiting for the 100 */
640 k->start100 = Curl_now();
641 }
642
643 *didwhat |= KEEP_RECV;
644 /* indicates data of zero size, i.e. empty file */
645 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
646
647 /* NUL terminate, allowing string ops to be used */
648 if(0 < nread || is_empty_data) {
649 k->buf[nread] = 0;
650 }
651 else if(0 >= nread) {
652 /* if we receive 0 or less here, the server closed the connection
653 and we bail out from this! */
654 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
655 k->keepon &= ~KEEP_RECV;
656 break;
657 }
658
659 /* Default buffer to use when we write the buffer, it may be changed
660 in the flow below before the actual storing is done. */
661 k->str = k->buf;
662
663 if(conn->handler->readwrite) {
664 result = conn->handler->readwrite(data, conn, &nread, &readmore);
665 if(result)
666 return result;
667 if(readmore)
668 break;
669 }
670
671#ifndef CURL_DISABLE_HTTP
672 /* Since this is a two-state thing, we check if we are parsing
673 headers at the moment or not. */
674 if(k->header) {
675 /* we are in parse-the-header-mode */
676 bool stop_reading = FALSE;
677 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
678 if(result)
679 return result;
680
681 if(conn->handler->readwrite &&
682 (k->maxdownload <= 0 && nread > 0)) {
683 result = conn->handler->readwrite(data, conn, &nread, &readmore);
684 if(result)
685 return result;
686 if(readmore)
687 break;
688 }
689
690 if(stop_reading) {
691 /* We've stopped dealing with input, get out of the do-while loop */
692
693 if(nread > 0) {
694 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
695 infof(data,
696 "Rewinding stream by : %zd"
697 " bytes on url %s (zero-length body)\n",
698 nread, data->state.up.path);
699 read_rewind(conn, (size_t)nread);
700 }
701 else {
702 infof(data,
703 "Excess found in a non pipelined read:"
704 " excess = %zd"
705 " url = %s (zero-length body)\n",
706 nread, data->state.up.path);
707 }
708 }
709
710 break;
711 }
712 }
713#endif /* CURL_DISABLE_HTTP */
714
715
716 /* This is not an 'else if' since it may be a rest from the header
717 parsing, where the beginning of the buffer is headers and the end
718 is non-headers. */
719 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
720
721 if(data->set.opt_no_body) {
722 /* data arrives although we want none, bail out */
723 streamclose(conn, "ignoring body");
724 *done = TRUE;
725 return CURLE_WEIRD_SERVER_REPLY;
726 }
727
728#ifndef CURL_DISABLE_HTTP
729 if(0 == k->bodywrites && !is_empty_data) {
730 /* These checks are only made the first time we are about to
731 write a piece of the body */
732 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
733 /* HTTP-only checks */
734
735 if(data->req.newurl) {
736 if(conn->bits.close) {
737 /* Abort after the headers if "follow Location" is set
738 and we're set to close anyway. */
739 k->keepon &= ~KEEP_RECV;
740 *done = TRUE;
741 return CURLE_OK;
742 }
743 /* We have a new url to load, but since we want to be able
744 to re-use this connection properly, we read the full
745 response in "ignore more" */
746 k->ignorebody = TRUE;
747 infof(data, "Ignoring the response-body\n");
748 }
749 if(data->state.resume_from && !k->content_range &&
750 (data->set.httpreq == HTTPREQ_GET) &&
751 !k->ignorebody) {
752
753 if(k->size == data->state.resume_from) {
754 /* The resume point is at the end of file, consider this fine
755 even if it doesn't allow resume from here. */
756 infof(data, "The entire document is already downloaded");
757 connclose(conn, "already downloaded");
758 /* Abort download */
759 k->keepon &= ~KEEP_RECV;
760 *done = TRUE;
761 return CURLE_OK;
762 }
763
764 /* we wanted to resume a download, although the server doesn't
765 * seem to support this and we did this with a GET (if it
766 * wasn't a GET we did a POST or PUT resume) */
767 failf(data, "HTTP server doesn't seem to support "
768 "byte ranges. Cannot resume.");
769 return CURLE_RANGE_ERROR;
770 }
771
772 if(data->set.timecondition && !data->state.range) {
773 /* A time condition has been set AND no ranges have been
774 requested. This seems to be what chapter 13.3.4 of
775 RFC 2616 defines to be the correct action for a
776 HTTP/1.1 client */
777
778 if(!Curl_meets_timecondition(data, k->timeofdoc)) {
779 *done = TRUE;
780 /* We're simulating a http 304 from server so we return
781 what should have been returned from the server */
782 data->info.httpcode = 304;
783 infof(data, "Simulate a HTTP 304 response!\n");
784 /* we abort the transfer before it is completed == we ruin the
785 re-use ability. Close the connection */
786 connclose(conn, "Simulated 304 handling");
787 return CURLE_OK;
788 }
789 } /* we have a time condition */
790
791 } /* this is HTTP or RTSP */
792 } /* this is the first time we write a body part */
793#endif /* CURL_DISABLE_HTTP */
794
795 k->bodywrites++;
796
797 /* pass data to the debug function before it gets "dechunked" */
798 if(data->set.verbose) {
799 if(k->badheader) {
800 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
801 (size_t)k->hbuflen);
802 if(k->badheader == HEADER_PARTHEADER)
803 Curl_debug(data, CURLINFO_DATA_IN,
804 k->str, (size_t)nread);
805 }
806 else
807 Curl_debug(data, CURLINFO_DATA_IN,
808 k->str, (size_t)nread);
809 }
810
811#ifndef CURL_DISABLE_HTTP
812 if(k->chunk) {
813 /*
814 * Here comes a chunked transfer flying and we need to decode this
815 * properly. While the name says read, this function both reads
816 * and writes away the data. The returned 'nread' holds the number
817 * of actual data it wrote to the client.
818 */
819
820 CHUNKcode res =
821 Curl_httpchunk_read(conn, k->str, nread, &nread);
822
823 if(CHUNKE_OK < res) {
824 if(CHUNKE_WRITE_ERROR == res) {
825 failf(data, "Failed writing data");
826 return CURLE_WRITE_ERROR;
827 }
828 failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
829 return CURLE_RECV_ERROR;
830 }
831 if(CHUNKE_STOP == res) {
832 size_t dataleft;
833 /* we're done reading chunks! */
834 k->keepon &= ~KEEP_RECV; /* read no more */
835
836 /* There are now possibly N number of bytes at the end of the
837 str buffer that weren't written to the client.
838
839 We DO care about this data if we are pipelining.
840 Push it back to be read on the next pass. */
841
842 dataleft = conn->chunk.dataleft;
843 if(dataleft != 0) {
844 infof(conn->data, "Leftovers after chunking: %zu bytes\n",
845 dataleft);
846 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
847 /* only attempt the rewind if we truly are pipelining */
848 infof(conn->data, "Rewinding %zu bytes\n",dataleft);
849 read_rewind(conn, dataleft);
850 }
851 }
852 }
853 /* If it returned OK, we just keep going */
854 }
855#endif /* CURL_DISABLE_HTTP */
856
857 /* Account for body content stored in the header buffer */
858 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
859 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
860 k->hbuflen));
861 k->bytecount += k->hbuflen;
862 }
863
864 if((-1 != k->maxdownload) &&
865 (k->bytecount + nread >= k->maxdownload)) {
866
867 excess = (size_t)(k->bytecount + nread - k->maxdownload);
868 if(excess > 0 && !k->ignorebody) {
869 if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
870 infof(data,
871 "Rewinding stream by : %zu"
872 " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
873 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
874 ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
875 excess, data->state.up.path,
876 k->size, k->maxdownload, k->bytecount, nread);
877 read_rewind(conn, excess);
878 }
879 else {
880 infof(data,
881 "Excess found in a non pipelined read:"
882 " excess = %zu"
883 ", size = %" CURL_FORMAT_CURL_OFF_T
884 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
885 ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
886 excess, k->size, k->maxdownload, k->bytecount);
887 }
888 }
889
890 nread = (ssize_t) (k->maxdownload - k->bytecount);
891 if(nread < 0) /* this should be unusual */
892 nread = 0;
893
894 k->keepon &= ~KEEP_RECV; /* we're done reading */
895 }
896
897 k->bytecount += nread;
898
899 Curl_pgrsSetDownloadCounter(data, k->bytecount);
900
901 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
902 /* If this is chunky transfer, it was already written */
903
904 if(k->badheader && !k->ignorebody) {
905 /* we parsed a piece of data wrongly assuming it was a header
906 and now we output it as body instead */
907
908 /* Don't let excess data pollute body writes */
909 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
910 result = Curl_client_write(conn, CLIENTWRITE_BODY,
911 data->state.headerbuff,
912 k->hbuflen);
913 else
914 result = Curl_client_write(conn, CLIENTWRITE_BODY,
915 data->state.headerbuff,
916 (size_t)k->maxdownload);
917
918 if(result)
919 return result;
920 }
921 if(k->badheader < HEADER_ALLBAD) {
922 /* This switch handles various content encodings. If there's an
923 error here, be sure to check over the almost identical code
924 in http_chunks.c.
925 Make sure that ALL_CONTENT_ENCODINGS contains all the
926 encodings handled here. */
927 if(conn->data->set.http_ce_skip || !k->writer_stack) {
928 if(!k->ignorebody) {
929#ifndef CURL_DISABLE_POP3
930 if(conn->handler->protocol & PROTO_FAMILY_POP3)
931 result = Curl_pop3_write(conn, k->str, nread);
932 else
933#endif /* CURL_DISABLE_POP3 */
934 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
935 nread);
936 }
937 }
938 else if(!k->ignorebody)
939 result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
940 }
941 k->badheader = HEADER_NORMAL; /* taken care of now */
942
943 if(result)
944 return result;
945 }
946
947 } /* if(!header and data to read) */
948
949 if(conn->handler->readwrite && excess && !conn->bits.stream_was_rewound) {
950 /* Parse the excess data */
951 k->str += nread;
952
953 if(&k->str[excess] > &k->buf[data->set.buffer_size]) {
954 /* the excess amount was too excessive(!), make sure
955 it doesn't read out of buffer */
956 excess = &k->buf[data->set.buffer_size] - k->str;
957 }
958 nread = (ssize_t)excess;
959
960 result = conn->handler->readwrite(data, conn, &nread, &readmore);
961 if(result)
962 return result;
963
964 if(readmore)
965 k->keepon |= KEEP_RECV; /* we're not done reading */
966 break;
967 }
968
969 if(is_empty_data) {
970 /* if we received nothing, the server closed the connection and we
971 are done */
972 k->keepon &= ~KEEP_RECV;
973 }
974
975 if(k->keepon & KEEP_RECV_PAUSE) {
976 /* this is a paused transfer */
977 break;
978 }
979
980 } while(data_pending(conn) && maxloops--);
981
982 if(maxloops <= 0) {
983 /* we mark it as read-again-please */
984 conn->cselect_bits = CURL_CSELECT_IN;
985 *comeback = TRUE;
986 }
987
988 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
989 conn->bits.close) {
990 /* When we've read the entire thing and the close bit is set, the server
991 may now close the connection. If there's now any kind of sending going
992 on from our side, we need to stop that immediately. */
993 infof(data, "we are done reading and this is set to close, stop send\n");
994 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
995 }
996
997 return CURLE_OK;
998}
999
1000static CURLcode done_sending(struct connectdata *conn,
1001 struct SingleRequest *k)
1002{
1003 k->keepon &= ~KEEP_SEND; /* we're done writing */
1004
1005 Curl_http2_done_sending(conn);
1006
1007 if(conn->bits.rewindaftersend) {
1008 CURLcode result = Curl_readrewind(conn);
1009 if(result)
1010 return result;
1011 }
1012 return CURLE_OK;
1013}
1014
1015#if defined(WIN32) && !defined(USE_LWIPSOCK)
1016#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
1017#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
1018#endif
1019
1020static void win_update_buffer_size(curl_socket_t sockfd)
1021{
1022 int result;
1023 ULONG ideal;
1024 DWORD ideallen;
1025 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
1026 &ideal, sizeof(ideal), &ideallen, 0, 0);
1027 if(result == 0) {
1028 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
1029 (const char *)&ideal, sizeof(ideal));
1030 }
1031}
1032#else
1033#define win_update_buffer_size(x)
1034#endif
1035
1036/*
1037 * Send data to upload to the server, when the socket is writable.
1038 */
1039static CURLcode readwrite_upload(struct Curl_easy *data,
1040 struct connectdata *conn,
1041 int *didwhat)
1042{
1043 ssize_t i, si;
1044 ssize_t bytes_written;
1045 CURLcode result;
1046 ssize_t nread; /* number of bytes read */
1047 bool sending_http_headers = FALSE;
1048 struct SingleRequest *k = &data->req;
1049
1050 if((k->bytecount == 0) && (k->writebytecount == 0))
1051 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1052
1053 *didwhat |= KEEP_SEND;
1054
1055 do {
1056 /* only read more data if there's no upload data already
1057 present in the upload buffer */
1058 if(0 == k->upload_present) {
1059 result = Curl_get_upload_buffer(data);
1060 if(result)
1061 return result;
1062 /* init the "upload from here" pointer */
1063 k->upload_fromhere = data->state.ulbuf;
1064
1065 if(!k->upload_done) {
1066 /* HTTP pollution, this should be written nicer to become more
1067 protocol agnostic. */
1068 size_t fillcount;
1069 struct HTTP *http = k->protop;
1070
1071 if((k->exp100 == EXP100_SENDING_REQUEST) &&
1072 (http->sending == HTTPSEND_BODY)) {
1073 /* If this call is to send body data, we must take some action:
1074 We have sent off the full HTTP 1.1 request, and we shall now
1075 go into the Expect: 100 state and await such a header */
1076 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
1077 k->keepon &= ~KEEP_SEND; /* disable writing */
1078 k->start100 = Curl_now(); /* timeout count starts now */
1079 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
1080 /* set a timeout for the multi interface */
1081 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1082 break;
1083 }
1084
1085 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
1086 if(http->sending == HTTPSEND_REQUEST)
1087 /* We're sending the HTTP request headers, not the data.
1088 Remember that so we don't change the line endings. */
1089 sending_http_headers = TRUE;
1090 else
1091 sending_http_headers = FALSE;
1092 }
1093
1094 result = Curl_fillreadbuffer(conn, data->set.upload_buffer_size,
1095 &fillcount);
1096 if(result)
1097 return result;
1098
1099 nread = fillcount;
1100 }
1101 else
1102 nread = 0; /* we're done uploading/reading */
1103
1104 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
1105 /* this is a paused transfer */
1106 break;
1107 }
1108 if(nread <= 0) {
1109 result = done_sending(conn, k);
1110 if(result)
1111 return result;
1112 break;
1113 }
1114
1115 /* store number of bytes available for upload */
1116 k->upload_present = nread;
1117
1118 /* convert LF to CRLF if so asked */
1119 if((!sending_http_headers) && (
1120#ifdef CURL_DO_LINEEND_CONV
1121 /* always convert if we're FTPing in ASCII mode */
1122 (data->set.prefer_ascii) ||
1123#endif
1124 (data->set.crlf))) {
1125 /* Do we need to allocate a scratch buffer? */
1126 if(!data->state.scratch) {
1127 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
1128 if(!data->state.scratch) {
1129 failf(data, "Failed to alloc scratch buffer!");
1130
1131 return CURLE_OUT_OF_MEMORY;
1132 }
1133 }
1134
1135 /*
1136 * ASCII/EBCDIC Note: This is presumably a text (not binary)
1137 * transfer so the data should already be in ASCII.
1138 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1139 * must be used instead of the escape sequences \r & \n.
1140 */
1141 for(i = 0, si = 0; i < nread; i++, si++) {
1142 if(k->upload_fromhere[i] == 0x0a) {
1143 data->state.scratch[si++] = 0x0d;
1144 data->state.scratch[si] = 0x0a;
1145 if(!data->set.crlf) {
1146 /* we're here only because FTP is in ASCII mode...
1147 bump infilesize for the LF we just added */
1148 if(data->state.infilesize != -1)
1149 data->state.infilesize++;
1150 }
1151 }
1152 else
1153 data->state.scratch[si] = k->upload_fromhere[i];
1154 }
1155
1156 if(si != nread) {
1157 /* only perform the special operation if we really did replace
1158 anything */
1159 nread = si;
1160
1161 /* upload from the new (replaced) buffer instead */
1162 k->upload_fromhere = data->state.scratch;
1163
1164 /* set the new amount too */
1165 k->upload_present = nread;
1166 }
1167 }
1168
1169#ifndef CURL_DISABLE_SMTP
1170 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1171 result = Curl_smtp_escape_eob(conn, nread);
1172 if(result)
1173 return result;
1174 }
1175#endif /* CURL_DISABLE_SMTP */
1176 } /* if 0 == k->upload_present */
1177 else {
1178 /* We have a partial buffer left from a previous "round". Use
1179 that instead of reading more data */
1180 }
1181
1182 /* write to socket (send away data) */
1183 result = Curl_write(conn,
1184 conn->writesockfd, /* socket to send to */
1185 k->upload_fromhere, /* buffer pointer */
1186 k->upload_present, /* buffer size */
1187 &bytes_written); /* actually sent */
1188 if(result)
1189 return result;
1190
1191 win_update_buffer_size(conn->writesockfd);
1192
1193 if(data->set.verbose)
1194 /* show the data before we change the pointer upload_fromhere */
1195 Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1196 (size_t)bytes_written);
1197
1198 k->writebytecount += bytes_written;
1199
1200 if((!k->upload_chunky || k->forbidchunk) &&
1201 (k->writebytecount == data->state.infilesize)) {
1202 /* we have sent all data we were supposed to */
1203 k->upload_done = TRUE;
1204 infof(data, "We are completely uploaded and fine\n");
1205 }
1206
1207 if(k->upload_present != bytes_written) {
1208 /* we only wrote a part of the buffer (if anything), deal with it! */
1209
1210 /* store the amount of bytes left in the buffer to write */
1211 k->upload_present -= bytes_written;
1212
1213 /* advance the pointer where to find the buffer when the next send
1214 is to happen */
1215 k->upload_fromhere += bytes_written;
1216 }
1217 else {
1218 /* we've uploaded that buffer now */
1219 result = Curl_get_upload_buffer(data);
1220 if(result)
1221 return result;
1222 k->upload_fromhere = data->state.ulbuf;
1223 k->upload_present = 0; /* no more bytes left */
1224
1225 if(k->upload_done) {
1226 result = done_sending(conn, k);
1227 if(result)
1228 return result;
1229 }
1230 }
1231
1232 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1233
1234 } WHILE_FALSE; /* just to break out from! */
1235
1236 return CURLE_OK;
1237}
1238
1239/*
1240 * Curl_readwrite() is the low-level function to be called when data is to
1241 * be read and written to/from the connection.
1242 *
1243 * return '*comeback' TRUE if we didn't properly drain the socket so this
1244 * function should get called again without select() or similar in between!
1245 */
1246CURLcode Curl_readwrite(struct connectdata *conn,
1247 struct Curl_easy *data,
1248 bool *done,
1249 bool *comeback)
1250{
1251 struct SingleRequest *k = &data->req;
1252 CURLcode result;
1253 int didwhat = 0;
1254
1255 curl_socket_t fd_read;
1256 curl_socket_t fd_write;
1257 int select_res = conn->cselect_bits;
1258
1259 conn->cselect_bits = 0;
1260
1261 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1262 then we are in rate limiting state in that transfer direction */
1263
1264 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1265 fd_read = conn->sockfd;
1266 else
1267 fd_read = CURL_SOCKET_BAD;
1268
1269 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1270 fd_write = conn->writesockfd;
1271 else
1272 fd_write = CURL_SOCKET_BAD;
1273
1274 if(conn->data->state.drain) {
1275 select_res |= CURL_CSELECT_IN;
1276 DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1277 }
1278
1279 if(!select_res) /* Call for select()/poll() only, if read/write/error
1280 status is not known. */
1281 select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1282
1283 if(select_res == CURL_CSELECT_ERR) {
1284 failf(data, "select/poll returned error");
1285 return CURLE_SEND_ERROR;
1286 }
1287
1288 /* We go ahead and do a read if we have a readable socket or if
1289 the stream was rewound (in which case we have data in a
1290 buffer) */
1291 if((k->keepon & KEEP_RECV) &&
1292 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1293
1294 result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1295 if(result || *done)
1296 return result;
1297 }
1298
1299 /* If we still have writing to do, we check if we have a writable socket. */
1300 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1301 /* write */
1302
1303 result = readwrite_upload(data, conn, &didwhat);
1304 if(result)
1305 return result;
1306 }
1307
1308 k->now = Curl_now();
1309 if(didwhat) {
1310 /* Update read/write counters */
1311 if(k->bytecountp)
1312 *k->bytecountp = k->bytecount; /* read count */
1313 if(k->writebytecountp)
1314 *k->writebytecountp = k->writebytecount; /* write count */
1315 }
1316 else {
1317 /* no read no write, this is a timeout? */
1318 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1319 /* This should allow some time for the header to arrive, but only a
1320 very short time as otherwise it'll be too much wasted time too
1321 often. */
1322
1323 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1324
1325 Therefore, when a client sends this header field to an origin server
1326 (possibly via a proxy) from which it has never seen a 100 (Continue)
1327 status, the client SHOULD NOT wait for an indefinite period before
1328 sending the request body.
1329
1330 */
1331
1332 timediff_t ms = Curl_timediff(k->now, k->start100);
1333 if(ms >= data->set.expect_100_timeout) {
1334 /* we've waited long enough, continue anyway */
1335 k->exp100 = EXP100_SEND_DATA;
1336 k->keepon |= KEEP_SEND;
1337 Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1338 infof(data, "Done waiting for 100-continue\n");
1339 }
1340 }
1341 }
1342
1343 if(Curl_pgrsUpdate(conn))
1344 result = CURLE_ABORTED_BY_CALLBACK;
1345 else
1346 result = Curl_speedcheck(data, k->now);
1347 if(result)
1348 return result;
1349
1350 if(k->keepon) {
1351 if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1352 if(k->size != -1) {
1353 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1354 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1355 CURL_FORMAT_CURL_OFF_T " bytes received",
1356 Curl_timediff(k->now, data->progress.t_startsingle),
1357 k->bytecount, k->size);
1358 }
1359 else {
1360 failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1361 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1362 Curl_timediff(k->now, data->progress.t_startsingle),
1363 k->bytecount);
1364 }
1365 return CURLE_OPERATION_TIMEDOUT;
1366 }
1367 }
1368 else {
1369 /*
1370 * The transfer has been performed. Just make some general checks before
1371 * returning.
1372 */
1373
1374 if(!(data->set.opt_no_body) && (k->size != -1) &&
1375 (k->bytecount != k->size) &&
1376#ifdef CURL_DO_LINEEND_CONV
1377 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1378 so we'll check to see if the discrepancy can be explained
1379 by the number of CRLFs we've changed to LFs.
1380 */
1381 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1382#endif /* CURL_DO_LINEEND_CONV */
1383 !k->newurl) {
1384 failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1385 " bytes remaining to read", k->size - k->bytecount);
1386 return CURLE_PARTIAL_FILE;
1387 }
1388 if(!(data->set.opt_no_body) && k->chunk &&
1389 (conn->chunk.state != CHUNK_STOP)) {
1390 /*
1391 * In chunked mode, return an error if the connection is closed prior to
1392 * the empty (terminating) chunk is read.
1393 *
1394 * The condition above used to check for
1395 * conn->proto.http->chunk.datasize != 0 which is true after reading
1396 * *any* chunk, not just the empty chunk.
1397 *
1398 */
1399 failf(data, "transfer closed with outstanding read data remaining");
1400 return CURLE_PARTIAL_FILE;
1401 }
1402 if(Curl_pgrsUpdate(conn))
1403 return CURLE_ABORTED_BY_CALLBACK;
1404 }
1405
1406 /* Now update the "done" boolean we return */
1407 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1408 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1409
1410 return CURLE_OK;
1411}
1412
1413/*
1414 * Curl_single_getsock() gets called by the multi interface code when the app
1415 * has requested to get the sockets for the current connection. This function
1416 * will then be called once for every connection that the multi interface
1417 * keeps track of. This function will only be called for connections that are
1418 * in the proper state to have this information available.
1419 */
1420int Curl_single_getsock(const struct connectdata *conn,
1421 curl_socket_t *sock, /* points to numsocks number
1422 of sockets */
1423 int numsocks)
1424{
1425 const struct Curl_easy *data = conn->data;
1426 int bitmap = GETSOCK_BLANK;
1427 unsigned sockindex = 0;
1428
1429 if(conn->handler->perform_getsock)
1430 return conn->handler->perform_getsock(conn, sock, numsocks);
1431
1432 if(numsocks < 2)
1433 /* simple check but we might need two slots */
1434 return GETSOCK_BLANK;
1435
1436 /* don't include HOLD and PAUSE connections */
1437 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1438
1439 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1440
1441 bitmap |= GETSOCK_READSOCK(sockindex);
1442 sock[sockindex] = conn->sockfd;
1443 }
1444
1445 /* don't include HOLD and PAUSE connections */
1446 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1447
1448 if((conn->sockfd != conn->writesockfd) ||
1449 bitmap == GETSOCK_BLANK) {
1450 /* only if they are not the same socket and we have a readable
1451 one, we increase index */
1452 if(bitmap != GETSOCK_BLANK)
1453 sockindex++; /* increase index if we need two entries */
1454
1455 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1456
1457 sock[sockindex] = conn->writesockfd;
1458 }
1459
1460 bitmap |= GETSOCK_WRITESOCK(sockindex);
1461 }
1462
1463 return bitmap;
1464}
1465
1466/* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1467 which means this gets called once for each subsequent redirect etc */
1468void Curl_init_CONNECT(struct Curl_easy *data)
1469{
1470 data->state.fread_func = data->set.fread_func_set;
1471 data->state.in = data->set.in_set;
1472}
1473
1474/*
1475 * Curl_pretransfer() is called immediately before a transfer starts, and only
1476 * once for one transfer no matter if it has redirects or do multi-pass
1477 * authentication etc.
1478 */
1479CURLcode Curl_pretransfer(struct Curl_easy *data)
1480{
1481 CURLcode result;
1482
1483 if(!data->change.url && !data->set.uh) {
1484 /* we can't do anything without URL */
1485 failf(data, "No URL set!");
1486 return CURLE_URL_MALFORMAT;
1487 }
1488
1489 /* since the URL may have been redirected in a previous use of this handle */
1490 if(data->change.url_alloc) {
1491 /* the already set URL is allocated, free it first! */
1492 Curl_safefree(data->change.url);
1493 data->change.url_alloc = FALSE;
1494 }
1495
1496 if(!data->change.url && data->set.uh) {
1497 CURLUcode uc;
1498 uc = curl_url_get(data->set.uh,
1499 CURLUPART_URL, &data->set.str[STRING_SET_URL], 0);
1500 if(uc) {
1501 failf(data, "No URL set!");
1502 return CURLE_URL_MALFORMAT;
1503 }
1504 }
1505
1506 data->change.url = data->set.str[STRING_SET_URL];
1507
1508 /* Init the SSL session ID cache here. We do it here since we want to do it
1509 after the *_setopt() calls (that could specify the size of the cache) but
1510 before any transfer takes place. */
1511 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1512 if(result)
1513 return result;
1514
1515 data->state.wildcardmatch = data->set.wildcard_enabled;
1516 data->set.followlocation = 0; /* reset the location-follow counter */
1517 data->state.this_is_a_follow = FALSE; /* reset this */
1518 data->state.errorbuf = FALSE; /* no error has occurred */
1519 data->state.httpversion = 0; /* don't assume any particular server version */
1520
1521 data->state.authproblem = FALSE;
1522 data->state.authhost.want = data->set.httpauth;
1523 data->state.authproxy.want = data->set.proxyauth;
1524 Curl_safefree(data->info.wouldredirect);
1525 data->info.wouldredirect = NULL;
1526
1527 if(data->set.httpreq == HTTPREQ_PUT)
1528 data->state.infilesize = data->set.filesize;
1529 else {
1530 data->state.infilesize = data->set.postfieldsize;
1531 if(data->set.postfields && (data->state.infilesize == -1))
1532 data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1533 }
1534
1535 /* If there is a list of cookie files to read, do it now! */
1536 if(data->change.cookielist)
1537 Curl_cookie_loadfiles(data);
1538
1539 /* If there is a list of host pairs to deal with */
1540 if(data->change.resolve)
1541 result = Curl_loadhostpairs(data);
1542
1543 if(!result) {
1544 /* Allow data->set.use_port to set which port to use. This needs to be
1545 * disabled for example when we follow Location: headers to URLs using
1546 * different ports! */
1547 data->state.allow_port = TRUE;
1548
1549#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1550 /*************************************************************
1551 * Tell signal handler to ignore SIGPIPE
1552 *************************************************************/
1553 if(!data->set.no_signal)
1554 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1555#endif
1556
1557 Curl_initinfo(data); /* reset session-specific information "variables" */
1558 Curl_pgrsResetTransferSizes(data);
1559 Curl_pgrsStartNow(data);
1560
1561 /* In case the handle is re-used and an authentication method was picked
1562 in the session we need to make sure we only use the one(s) we now
1563 consider to be fine */
1564 data->state.authhost.picked &= data->state.authhost.want;
1565 data->state.authproxy.picked &= data->state.authproxy.want;
1566
1567 if(data->state.wildcardmatch) {
1568 struct WildcardData *wc = &data->wildcard;
1569 if(wc->state < CURLWC_INIT) {
1570 result = Curl_wildcard_init(wc); /* init wildcard structures */
1571 if(result)
1572 return CURLE_OUT_OF_MEMORY;
1573 }
1574 }
1575 }
1576
1577 return result;
1578}
1579
1580/*
1581 * Curl_posttransfer() is called immediately after a transfer ends
1582 */
1583CURLcode Curl_posttransfer(struct Curl_easy *data)
1584{
1585#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1586 /* restore the signal handler for SIGPIPE before we get back */
1587 if(!data->set.no_signal)
1588 signal(SIGPIPE, data->state.prev_signal);
1589#else
1590 (void)data; /* unused parameter */
1591#endif
1592
1593 return CURLE_OK;
1594}
1595
1596/*
1597 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1598 * as given by the remote server and set up the new URL to request.
1599 *
1600 * This function DOES NOT FREE the given url.
1601 */
1602CURLcode Curl_follow(struct Curl_easy *data,
1603 char *newurl, /* the Location: string */
1604 followtype type) /* see transfer.h */
1605{
1606#ifdef CURL_DISABLE_HTTP
1607 (void)data;
1608 (void)newurl;
1609 (void)type;
1610 /* Location: following will not happen when HTTP is disabled */
1611 return CURLE_TOO_MANY_REDIRECTS;
1612#else
1613
1614 /* Location: redirect */
1615 bool disallowport = FALSE;
1616 bool reachedmax = FALSE;
1617 CURLUcode uc;
1618
1619 if(type == FOLLOW_REDIR) {
1620 if((data->set.maxredirs != -1) &&
1621 (data->set.followlocation >= data->set.maxredirs)) {
1622 reachedmax = TRUE;
1623 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1624 to URL */
1625 }
1626 else {
1627 /* mark the next request as a followed location: */
1628 data->state.this_is_a_follow = TRUE;
1629
1630 data->set.followlocation++; /* count location-followers */
1631
1632 if(data->set.http_auto_referer) {
1633 /* We are asked to automatically set the previous URL as the referer
1634 when we get the next URL. We pick the ->url field, which may or may
1635 not be 100% correct */
1636
1637 if(data->change.referer_alloc) {
1638 Curl_safefree(data->change.referer);
1639 data->change.referer_alloc = FALSE;
1640 }
1641
1642 data->change.referer = strdup(data->change.url);
1643 if(!data->change.referer)
1644 return CURLE_OUT_OF_MEMORY;
1645 data->change.referer_alloc = TRUE; /* yes, free this later */
1646 }
1647 }
1648 }
1649
1650 if(Curl_is_absolute_url(newurl, NULL, MAX_SCHEME_LEN))
1651 /* This is an absolute URL, don't allow the custom port number */
1652 disallowport = TRUE;
1653
1654 DEBUGASSERT(data->state.uh);
1655 uc = curl_url_set(data->state.uh, CURLUPART_URL, newurl,
1656 (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME : 0);
1657 if(uc) {
1658 if(type != FOLLOW_FAKE)
1659 return Curl_uc_to_curlcode(uc);
1660
1661 /* the URL could not be parsed for some reason, but since this is FAKE
1662 mode, just duplicate the field as-is */
1663 newurl = strdup(newurl);
1664 if(!newurl)
1665 return CURLE_OUT_OF_MEMORY;
1666 }
1667 else {
1668
1669 uc = curl_url_get(data->state.uh, CURLUPART_URL, &newurl, 0);
1670 if(uc)
1671 return Curl_uc_to_curlcode(uc);
1672 }
1673
1674 if(type == FOLLOW_FAKE) {
1675 /* we're only figuring out the new url if we would've followed locations
1676 but now we're done so we can get out! */
1677 data->info.wouldredirect = newurl;
1678
1679 if(reachedmax) {
1680 failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1681 return CURLE_TOO_MANY_REDIRECTS;
1682 }
1683 return CURLE_OK;
1684 }
1685
1686 if(disallowport)
1687 data->state.allow_port = FALSE;
1688
1689 if(data->change.url_alloc)
1690 Curl_safefree(data->change.url);
1691
1692 data->change.url = newurl;
1693 data->change.url_alloc = TRUE;
1694
1695 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1696
1697 /*
1698 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1699 * differently based on exactly what return code there was.
1700 *
1701 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1702 * a HTTP (proxy-) authentication scheme other than Basic.
1703 */
1704 switch(data->info.httpcode) {
1705 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1706 Authorization: XXXX header in the HTTP request code snippet */
1707 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1708 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1709 /* 300 - Multiple Choices */
1710 /* 306 - Not used */
1711 /* 307 - Temporary Redirect */
1712 default: /* for all above (and the unknown ones) */
1713 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1714 * seem to be OK to POST to.
1715 */
1716 break;
1717 case 301: /* Moved Permanently */
1718 /* (quote from RFC7231, section 6.4.2)
1719 *
1720 * Note: For historical reasons, a user agent MAY change the request
1721 * method from POST to GET for the subsequent request. If this
1722 * behavior is undesired, the 307 (Temporary Redirect) status code
1723 * can be used instead.
1724 *
1725 * ----
1726 *
1727 * Many webservers expect this, so these servers often answers to a POST
1728 * request with an error page. To be sure that libcurl gets the page that
1729 * most user agents would get, libcurl has to force GET.
1730 *
1731 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1732 * can be overridden with CURLOPT_POSTREDIR.
1733 */
1734 if((data->set.httpreq == HTTPREQ_POST
1735 || data->set.httpreq == HTTPREQ_POST_FORM
1736 || data->set.httpreq == HTTPREQ_POST_MIME)
1737 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1738 infof(data, "Switch from POST to GET\n");
1739 data->set.httpreq = HTTPREQ_GET;
1740 }
1741 break;
1742 case 302: /* Found */
1743 /* (quote from RFC7231, section 6.4.3)
1744 *
1745 * Note: For historical reasons, a user agent MAY change the request
1746 * method from POST to GET for the subsequent request. If this
1747 * behavior is undesired, the 307 (Temporary Redirect) status code
1748 * can be used instead.
1749 *
1750 * ----
1751 *
1752 * Many webservers expect this, so these servers often answers to a POST
1753 * request with an error page. To be sure that libcurl gets the page that
1754 * most user agents would get, libcurl has to force GET.
1755 *
1756 * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1757 * can be overridden with CURLOPT_POSTREDIR.
1758 */
1759 if((data->set.httpreq == HTTPREQ_POST
1760 || data->set.httpreq == HTTPREQ_POST_FORM
1761 || data->set.httpreq == HTTPREQ_POST_MIME)
1762 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1763 infof(data, "Switch from POST to GET\n");
1764 data->set.httpreq = HTTPREQ_GET;
1765 }
1766 break;
1767
1768 case 303: /* See Other */
1769 /* Disable both types of POSTs, unless the user explicitly
1770 asks for POST after POST */
1771 if(data->set.httpreq != HTTPREQ_GET
1772 && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1773 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1774 infof(data, "Disables POST, goes with %s\n",
1775 data->set.opt_no_body?"HEAD":"GET");
1776 }
1777 break;
1778 case 304: /* Not Modified */
1779 /* 304 means we did a conditional request and it was "Not modified".
1780 * We shouldn't get any Location: header in this response!
1781 */
1782 break;
1783 case 305: /* Use Proxy */
1784 /* (quote from RFC2616, section 10.3.6):
1785 * "The requested resource MUST be accessed through the proxy given
1786 * by the Location field. The Location field gives the URI of the
1787 * proxy. The recipient is expected to repeat this single request
1788 * via the proxy. 305 responses MUST only be generated by origin
1789 * servers."
1790 */
1791 break;
1792 }
1793 Curl_pgrsTime(data, TIMER_REDIRECT);
1794 Curl_pgrsResetTransferSizes(data);
1795
1796 return CURLE_OK;
1797#endif /* CURL_DISABLE_HTTP */
1798}
1799
1800/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1801
1802 NOTE: that the *url is malloc()ed. */
1803CURLcode Curl_retry_request(struct connectdata *conn,
1804 char **url)
1805{
1806 struct Curl_easy *data = conn->data;
1807 bool retry = FALSE;
1808 *url = NULL;
1809
1810 /* if we're talking upload, we can't do the checks below, unless the protocol
1811 is HTTP as when uploading over HTTP we will still get a response */
1812 if(data->set.upload &&
1813 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1814 return CURLE_OK;
1815
1816 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1817 conn->bits.reuse &&
1818 (!data->set.opt_no_body
1819 || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1820 (data->set.rtspreq != RTSPREQ_RECEIVE))
1821 /* We got no data, we attempted to re-use a connection. For HTTP this
1822 can be a retry so we try again regardless if we expected a body.
1823 For other protocols we only try again only if we expected a body.
1824
1825 This might happen if the connection was left alive when we were
1826 done using it before, but that was closed when we wanted to read from
1827 it again. Bad luck. Retry the same request on a fresh connect! */
1828 retry = TRUE;
1829 else if(data->state.refused_stream &&
1830 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1831 /* This was sent on a refused stream, safe to rerun. A refused stream
1832 error can typically only happen on HTTP/2 level if the stream is safe
1833 to issue again, but the nghttp2 API can deliver the message to other
1834 streams as well, which is why this adds the check the data counters
1835 too. */
1836 infof(conn->data, "REFUSED_STREAM, retrying a fresh connect\n");
1837 data->state.refused_stream = FALSE; /* clear again */
1838 retry = TRUE;
1839 }
1840 if(retry) {
1841 infof(conn->data, "Connection died, retrying a fresh connect\n");
1842 *url = strdup(conn->data->change.url);
1843 if(!*url)
1844 return CURLE_OUT_OF_MEMORY;
1845
1846 connclose(conn, "retry"); /* close this connection */
1847 conn->bits.retry = TRUE; /* mark this as a connection we're about
1848 to retry. Marking it this way should
1849 prevent i.e HTTP transfers to return
1850 error just because nothing has been
1851 transferred! */
1852
1853
1854 if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1855 struct HTTP *http = data->req.protop;
1856 if(http->writebytecount) {
1857 CURLcode result = Curl_readrewind(conn);
1858 if(result) {
1859 Curl_safefree(*url);
1860 return result;
1861 }
1862 }
1863 }
1864 }
1865 return CURLE_OK;
1866}
1867
1868/*
1869 * Curl_setup_transfer() is called to setup some basic properties for the
1870 * upcoming transfer.
1871 */
1872void
1873Curl_setup_transfer(
1874 struct connectdata *conn, /* connection data */
1875 int sockindex, /* socket index to read from or -1 */
1876 curl_off_t size, /* -1 if unknown at this point */
1877 bool getheader, /* TRUE if header parsing is wanted */
1878 curl_off_t *bytecountp, /* return number of bytes read or NULL */
1879 int writesockindex, /* socket index to write to, it may very well be
1880 the same we read from. -1 disables */
1881 curl_off_t *writecountp /* return number of bytes written or NULL */
1882 )
1883{
1884 struct Curl_easy *data;
1885 struct SingleRequest *k;
1886
1887 DEBUGASSERT(conn != NULL);
1888
1889 data = conn->data;
1890 k = &data->req;
1891
1892 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1893
1894 if(conn->bits.multiplex || conn->httpversion == 20) {
1895 /* when multiplexing, the read/write sockets need to be the same! */
1896 conn->sockfd = sockindex == -1 ?
1897 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1898 conn->sock[sockindex];
1899 conn->writesockfd = conn->sockfd;
1900 }
1901 else {
1902 conn->sockfd = sockindex == -1 ?
1903 CURL_SOCKET_BAD : conn->sock[sockindex];
1904 conn->writesockfd = writesockindex == -1 ?
1905 CURL_SOCKET_BAD:conn->sock[writesockindex];
1906 }
1907 k->getheader = getheader;
1908
1909 k->size = size;
1910 k->bytecountp = bytecountp;
1911 k->writebytecountp = writecountp;
1912
1913 /* The code sequence below is placed in this function just because all
1914 necessary input is not always known in do_complete() as this function may
1915 be called after that */
1916
1917 if(!k->getheader) {
1918 k->header = FALSE;
1919 if(size > 0)
1920 Curl_pgrsSetDownloadSize(data, size);
1921 }
1922 /* we want header and/or body, if neither then don't do this! */
1923 if(k->getheader || !data->set.opt_no_body) {
1924
1925 if(sockindex != -1)
1926 k->keepon |= KEEP_RECV;
1927
1928 if(writesockindex != -1) {
1929 struct HTTP *http = data->req.protop;
1930 /* HTTP 1.1 magic:
1931
1932 Even if we require a 100-return code before uploading data, we might
1933 need to write data before that since the REQUEST may not have been
1934 finished sent off just yet.
1935
1936 Thus, we must check if the request has been sent before we set the
1937 state info where we wait for the 100-return code
1938 */
1939 if((data->state.expect100header) &&
1940 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1941 (http->sending == HTTPSEND_BODY)) {
1942 /* wait with write until we either got 100-continue or a timeout */
1943 k->exp100 = EXP100_AWAITING_CONTINUE;
1944 k->start100 = Curl_now();
1945
1946 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1947 that we don't fire slightly too early and get denied to run. */
1948 Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1949 }
1950 else {
1951 if(data->state.expect100header)
1952 /* when we've sent off the rest of the headers, we must await a
1953 100-continue but first finish sending the request */
1954 k->exp100 = EXP100_SENDING_REQUEST;
1955
1956 /* enable the write bit when we're not waiting for continue */
1957 k->keepon |= KEEP_SEND;
1958 }
1959 } /* if(writesockindex != -1) */
1960 } /* if(k->getheader || !data->set.opt_no_body) */
1961
1962}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette