1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <asm/uaccess.h>
31 #include <asm/processor.h>
32 #include <linux/mempool.h>
33 #include "cifspdu.h"
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 
38 extern mempool_t *cifs_mid_poolp;
39 
40 static void
wake_up_task(struct mid_q_entry * mid)41 wake_up_task(struct mid_q_entry *mid)
42 {
43 	wake_up_process(mid->callback_data);
44 }
45 
46 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 	struct mid_q_entry *temp;
50 
51 	if (server == NULL) {
52 		cERROR(1, "Null TCP session in AllocMidQEntry");
53 		return NULL;
54 	}
55 
56 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 	if (temp == NULL)
58 		return temp;
59 	else {
60 		memset(temp, 0, sizeof(struct mid_q_entry));
61 		temp->mid = smb_buffer->Mid;	/* always LE */
62 		temp->pid = current->pid;
63 		temp->command = smb_buffer->Command;
64 		cFYI(1, "For smb_command %d", temp->command);
65 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 		/* when mid allocated can be before when sent */
67 		temp->when_alloc = jiffies;
68 
69 		/*
70 		 * The default is for the mid to be synchronous, so the
71 		 * default callback just wakes up the current task.
72 		 */
73 		temp->callback = wake_up_task;
74 		temp->callback_data = current;
75 	}
76 
77 	atomic_inc(&midCount);
78 	temp->midState = MID_REQUEST_ALLOCATED;
79 	return temp;
80 }
81 
82 void
DeleteMidQEntry(struct mid_q_entry * midEntry)83 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 {
85 #ifdef CONFIG_CIFS_STATS2
86 	unsigned long now;
87 #endif
88 	midEntry->midState = MID_FREE;
89 	atomic_dec(&midCount);
90 	if (midEntry->largeBuf)
91 		cifs_buf_release(midEntry->resp_buf);
92 	else
93 		cifs_small_buf_release(midEntry->resp_buf);
94 #ifdef CONFIG_CIFS_STATS2
95 	now = jiffies;
96 	/* commands taking longer than one second are indications that
97 	   something is wrong, unless it is quite a slow link or server */
98 	if ((now - midEntry->when_alloc) > HZ) {
99 		if ((cifsFYI & CIFS_TIMER) &&
100 		   (midEntry->command != SMB_COM_LOCKING_ANDX)) {
101 			printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d",
102 			       midEntry->command, midEntry->mid);
103 			printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104 			       now - midEntry->when_alloc,
105 			       now - midEntry->when_sent,
106 			       now - midEntry->when_received);
107 		}
108 	}
109 #endif
110 	mempool_free(midEntry, cifs_mid_poolp);
111 }
112 
113 static void
delete_mid(struct mid_q_entry * mid)114 delete_mid(struct mid_q_entry *mid)
115 {
116 	spin_lock(&GlobalMid_Lock);
117 	list_del(&mid->qhead);
118 	spin_unlock(&GlobalMid_Lock);
119 
120 	DeleteMidQEntry(mid);
121 }
122 
123 static int
smb_sendv(struct TCP_Server_Info * server,struct kvec * iov,int n_vec)124 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 {
126 	int rc = 0;
127 	int i = 0;
128 	struct msghdr smb_msg;
129 	struct smb_hdr *smb_buffer = iov[0].iov_base;
130 	unsigned int len = iov[0].iov_len;
131 	unsigned int total_len;
132 	int first_vec = 0;
133 	unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length);
134 	struct socket *ssocket = server->ssocket;
135 
136 	if (ssocket == NULL)
137 		return -ENOTSOCK; /* BB eventually add reconnect code here */
138 
139 	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
140 	smb_msg.msg_namelen = sizeof(struct sockaddr);
141 	smb_msg.msg_control = NULL;
142 	smb_msg.msg_controllen = 0;
143 	if (server->noblocksnd)
144 		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145 	else
146 		smb_msg.msg_flags = MSG_NOSIGNAL;
147 
148 	total_len = 0;
149 	for (i = 0; i < n_vec; i++)
150 		total_len += iov[i].iov_len;
151 
152 	cFYI(1, "Sending smb:  total_len %d", total_len);
153 	dump_smb(smb_buffer, len);
154 
155 	i = 0;
156 	while (total_len) {
157 		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
158 				    n_vec - first_vec, total_len);
159 		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 			i++;
161 			/* if blocking send we try 3 times, since each can block
162 			   for 5 seconds. For nonblocking  we have to try more
163 			   but wait increasing amounts of time allowing time for
164 			   socket to clear.  The overall time we wait in either
165 			   case to send on the socket is about 15 seconds.
166 			   Similarly we wait for 15 seconds for
167 			   a response from the server in SendReceive[2]
168 			   for the server to send a response back for
169 			   most types of requests (except SMB Write
170 			   past end of file which can be slow, and
171 			   blocking lock operations). NFS waits slightly longer
172 			   than CIFS, but this can make it take longer for
173 			   nonresponsive servers to be detected and 15 seconds
174 			   is more than enough time for modern networks to
175 			   send a packet.  In most cases if we fail to send
176 			   after the retries we will kill the socket and
177 			   reconnect which may clear the network problem.
178 			*/
179 			if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180 				cERROR(1, "sends on sock %p stuck for 15 seconds",
181 				    ssocket);
182 				rc = -EAGAIN;
183 				break;
184 			}
185 			msleep(1 << i);
186 			continue;
187 		}
188 		if (rc < 0)
189 			break;
190 
191 		if (rc == total_len) {
192 			total_len = 0;
193 			break;
194 		} else if (rc > total_len) {
195 			cERROR(1, "sent %d requested %d", rc, total_len);
196 			break;
197 		}
198 		if (rc == 0) {
199 			/* should never happen, letting socket clear before
200 			   retrying is our only obvious option here */
201 			cERROR(1, "tcp sent no data");
202 			msleep(500);
203 			continue;
204 		}
205 		total_len -= rc;
206 		/* the line below resets i */
207 		for (i = first_vec; i < n_vec; i++) {
208 			if (iov[i].iov_len) {
209 				if (rc > iov[i].iov_len) {
210 					rc -= iov[i].iov_len;
211 					iov[i].iov_len = 0;
212 				} else {
213 					iov[i].iov_base += rc;
214 					iov[i].iov_len -= rc;
215 					first_vec = i;
216 					break;
217 				}
218 			}
219 		}
220 		i = 0; /* in case we get ENOSPC on the next send */
221 	}
222 
223 	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
224 		cFYI(1, "partial send (%d remaining), terminating session",
225 			total_len);
226 		/* If we have only sent part of an SMB then the next SMB
227 		   could be taken as the remainder of this one.  We need
228 		   to kill the socket so the server throws away the partial
229 		   SMB */
230 		server->tcpStatus = CifsNeedReconnect;
231 	}
232 
233 	if (rc < 0 && rc != -EINTR)
234 		cERROR(1, "Error %d sending data on socket to server", rc);
235 	else
236 		rc = 0;
237 
238 	/* Don't want to modify the buffer as a
239 	   side effect of this call. */
240 	smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
241 
242 	return rc;
243 }
244 
245 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)246 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
247 	 unsigned int smb_buf_length)
248 {
249 	struct kvec iov;
250 
251 	iov.iov_base = smb_buffer;
252 	iov.iov_len = smb_buf_length + 4;
253 
254 	return smb_sendv(server, &iov, 1);
255 }
256 
wait_for_free_request(struct TCP_Server_Info * server,const int long_op)257 static int wait_for_free_request(struct TCP_Server_Info *server,
258 				 const int long_op)
259 {
260 	if (long_op == CIFS_ASYNC_OP) {
261 		/* oplock breaks must not be held up */
262 		atomic_inc(&server->inFlight);
263 		return 0;
264 	}
265 
266 	spin_lock(&GlobalMid_Lock);
267 	while (1) {
268 		if (atomic_read(&server->inFlight) >= cifs_max_pending) {
269 			spin_unlock(&GlobalMid_Lock);
270 			cifs_num_waiters_inc(server);
271 			wait_event(server->request_q,
272 				   atomic_read(&server->inFlight)
273 				     < cifs_max_pending);
274 			cifs_num_waiters_dec(server);
275 			spin_lock(&GlobalMid_Lock);
276 		} else {
277 			if (server->tcpStatus == CifsExiting) {
278 				spin_unlock(&GlobalMid_Lock);
279 				return -ENOENT;
280 			}
281 
282 			/* can not count locking commands against total
283 			   as they are allowed to block on server */
284 
285 			/* update # of requests on the wire to server */
286 			if (long_op != CIFS_BLOCKING_OP)
287 				atomic_inc(&server->inFlight);
288 			spin_unlock(&GlobalMid_Lock);
289 			break;
290 		}
291 	}
292 	return 0;
293 }
294 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)295 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
296 			struct mid_q_entry **ppmidQ)
297 {
298 	if (ses->server->tcpStatus == CifsExiting) {
299 		return -ENOENT;
300 	}
301 
302 	if (ses->server->tcpStatus == CifsNeedReconnect) {
303 		cFYI(1, "tcp session dead - return to caller to retry");
304 		return -EAGAIN;
305 	}
306 
307 	if (ses->status != CifsGood) {
308 		/* check if SMB session is bad because we are setting it up */
309 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
310 			(in_buf->Command != SMB_COM_NEGOTIATE))
311 			return -EAGAIN;
312 		/* else ok - we are setting up session */
313 	}
314 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
315 	if (*ppmidQ == NULL)
316 		return -ENOMEM;
317 	spin_lock(&GlobalMid_Lock);
318 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
319 	spin_unlock(&GlobalMid_Lock);
320 	return 0;
321 }
322 
323 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)324 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
325 {
326 	int error;
327 
328 	error = wait_event_freezekillable(server->response_q,
329 				    midQ->midState != MID_REQUEST_SUBMITTED);
330 	if (error < 0)
331 		return -ERESTARTSYS;
332 
333 	return 0;
334 }
335 
336 
337 /*
338  * Send a SMB request and set the callback function in the mid to handle
339  * the result. Caller is responsible for dealing with timeouts.
340  */
341 int
cifs_call_async(struct TCP_Server_Info * server,struct kvec * iov,unsigned int nvec,mid_receive_t * receive,mid_callback_t * callback,void * cbdata,bool ignore_pend)342 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
343 		unsigned int nvec, mid_receive_t *receive,
344 		mid_callback_t *callback, void *cbdata, bool ignore_pend)
345 {
346 	int rc;
347 	struct mid_q_entry *mid;
348 	struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
349 
350 	rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
351 	if (rc)
352 		return rc;
353 
354 	/* enable signing if server requires it */
355 	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
356 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
357 
358 	mutex_lock(&server->srv_mutex);
359 	mid = AllocMidQEntry(hdr, server);
360 	if (mid == NULL) {
361 		mutex_unlock(&server->srv_mutex);
362 		atomic_dec(&server->inFlight);
363 		wake_up(&server->request_q);
364 		return -ENOMEM;
365 	}
366 
367 	/* put it on the pending_mid_q */
368 	spin_lock(&GlobalMid_Lock);
369 	list_add_tail(&mid->qhead, &server->pending_mid_q);
370 	spin_unlock(&GlobalMid_Lock);
371 
372 	rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
373 	if (rc) {
374 		mutex_unlock(&server->srv_mutex);
375 		goto out_err;
376 	}
377 
378 	mid->receive = receive;
379 	mid->callback = callback;
380 	mid->callback_data = cbdata;
381 	mid->midState = MID_REQUEST_SUBMITTED;
382 
383 	cifs_in_send_inc(server);
384 	rc = smb_sendv(server, iov, nvec);
385 	cifs_in_send_dec(server);
386 	cifs_save_when_sent(mid);
387 	mutex_unlock(&server->srv_mutex);
388 
389 	if (rc)
390 		goto out_err;
391 
392 	return rc;
393 out_err:
394 	delete_mid(mid);
395 	atomic_dec(&server->inFlight);
396 	wake_up(&server->request_q);
397 	return rc;
398 }
399 
400 /*
401  *
402  * Send an SMB Request.  No response info (other than return code)
403  * needs to be parsed.
404  *
405  * flags indicate the type of request buffer and how long to wait
406  * and whether to log NT STATUS code (error) before mapping it to POSIX error
407  *
408  */
409 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,int flags)410 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
411 		struct smb_hdr *in_buf, int flags)
412 {
413 	int rc;
414 	struct kvec iov[1];
415 	int resp_buf_type;
416 
417 	iov[0].iov_base = (char *)in_buf;
418 	iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4;
419 	flags |= CIFS_NO_RESP;
420 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
421 	cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
422 
423 	return rc;
424 }
425 
426 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)427 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
428 {
429 	int rc = 0;
430 
431 	cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
432 		mid->mid, mid->midState);
433 
434 	spin_lock(&GlobalMid_Lock);
435 	switch (mid->midState) {
436 	case MID_RESPONSE_RECEIVED:
437 		spin_unlock(&GlobalMid_Lock);
438 		return rc;
439 	case MID_RETRY_NEEDED:
440 		rc = -EAGAIN;
441 		break;
442 	case MID_RESPONSE_MALFORMED:
443 		rc = -EIO;
444 		break;
445 	case MID_SHUTDOWN:
446 		rc = -EHOSTDOWN;
447 		break;
448 	default:
449 		list_del_init(&mid->qhead);
450 		cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
451 			mid->mid, mid->midState);
452 		rc = -EIO;
453 	}
454 	spin_unlock(&GlobalMid_Lock);
455 
456 	DeleteMidQEntry(mid);
457 	return rc;
458 }
459 
460 /*
461  * An NT cancel request header looks just like the original request except:
462  *
463  * The Command is SMB_COM_NT_CANCEL
464  * The WordCount is zeroed out
465  * The ByteCount is zeroed out
466  *
467  * This function mangles an existing request buffer into a
468  * SMB_COM_NT_CANCEL request and then sends it.
469  */
470 static int
send_nt_cancel(struct TCP_Server_Info * server,struct smb_hdr * in_buf,struct mid_q_entry * mid)471 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
472 		struct mid_q_entry *mid)
473 {
474 	int rc = 0;
475 
476 	/* -4 for RFC1001 length and +2 for BCC field */
477 	in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4  + 2);
478 	in_buf->Command = SMB_COM_NT_CANCEL;
479 	in_buf->WordCount = 0;
480 	put_bcc(0, in_buf);
481 
482 	mutex_lock(&server->srv_mutex);
483 	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
484 	if (rc) {
485 		mutex_unlock(&server->srv_mutex);
486 		return rc;
487 	}
488 	rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
489 	mutex_unlock(&server->srv_mutex);
490 
491 	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
492 		in_buf->Mid, rc);
493 
494 	return rc;
495 }
496 
497 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)498 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
499 		   bool log_error)
500 {
501 	unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
502 
503 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
504 
505 	/* convert the length into a more usable form */
506 	if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
507 		struct kvec iov;
508 
509 		iov.iov_base = mid->resp_buf;
510 		iov.iov_len = len;
511 		/* FIXME: add code to kill session */
512 		if (cifs_verify_signature(&iov, 1, server,
513 					  mid->sequence_number + 1) != 0)
514 			cERROR(1, "Unexpected SMB signature");
515 	}
516 
517 	/* BB special case reconnect tid and uid here? */
518 	return map_smb_to_linux_error(mid->resp_buf, log_error);
519 }
520 
521 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * pRespBufType,const int flags)522 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
523 	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
524 	     const int flags)
525 {
526 	int rc = 0;
527 	int long_op;
528 	struct mid_q_entry *midQ;
529 	struct smb_hdr *in_buf = iov[0].iov_base;
530 
531 	long_op = flags & CIFS_TIMEOUT_MASK;
532 
533 	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
534 
535 	if ((ses == NULL) || (ses->server == NULL)) {
536 		cifs_small_buf_release(in_buf);
537 		cERROR(1, "Null session");
538 		return -EIO;
539 	}
540 
541 	if (ses->server->tcpStatus == CifsExiting) {
542 		cifs_small_buf_release(in_buf);
543 		return -ENOENT;
544 	}
545 
546 	/* Ensure that we do not send more than 50 overlapping requests
547 	   to the same server. We may make this configurable later or
548 	   use ses->maxReq */
549 
550 	rc = wait_for_free_request(ses->server, long_op);
551 	if (rc) {
552 		cifs_small_buf_release(in_buf);
553 		return rc;
554 	}
555 
556 	/* make sure that we sign in the same order that we send on this socket
557 	   and avoid races inside tcp sendmsg code that could cause corruption
558 	   of smb data */
559 
560 	mutex_lock(&ses->server->srv_mutex);
561 
562 	rc = allocate_mid(ses, in_buf, &midQ);
563 	if (rc) {
564 		mutex_unlock(&ses->server->srv_mutex);
565 		cifs_small_buf_release(in_buf);
566 		/* Update # of requests on wire to server */
567 		atomic_dec(&ses->server->inFlight);
568 		wake_up(&ses->server->request_q);
569 		return rc;
570 	}
571 	rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
572 	if (rc) {
573 		mutex_unlock(&ses->server->srv_mutex);
574 		cifs_small_buf_release(in_buf);
575 		goto out;
576 	}
577 
578 	midQ->midState = MID_REQUEST_SUBMITTED;
579 	cifs_in_send_inc(ses->server);
580 	rc = smb_sendv(ses->server, iov, n_vec);
581 	cifs_in_send_dec(ses->server);
582 	cifs_save_when_sent(midQ);
583 
584 	mutex_unlock(&ses->server->srv_mutex);
585 
586 	if (rc < 0) {
587 		cifs_small_buf_release(in_buf);
588 		goto out;
589 	}
590 
591 	if (long_op == CIFS_ASYNC_OP) {
592 		cifs_small_buf_release(in_buf);
593 		goto out;
594 	}
595 
596 	rc = wait_for_response(ses->server, midQ);
597 	if (rc != 0) {
598 		send_nt_cancel(ses->server, in_buf, midQ);
599 		spin_lock(&GlobalMid_Lock);
600 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
601 			midQ->callback = DeleteMidQEntry;
602 			spin_unlock(&GlobalMid_Lock);
603 			cifs_small_buf_release(in_buf);
604 			atomic_dec(&ses->server->inFlight);
605 			wake_up(&ses->server->request_q);
606 			return rc;
607 		}
608 		spin_unlock(&GlobalMid_Lock);
609 	}
610 
611 	cifs_small_buf_release(in_buf);
612 
613 	rc = cifs_sync_mid_result(midQ, ses->server);
614 	if (rc != 0) {
615 		atomic_dec(&ses->server->inFlight);
616 		wake_up(&ses->server->request_q);
617 		return rc;
618 	}
619 
620 	if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
621 		rc = -EIO;
622 		cFYI(1, "Bad MID state?");
623 		goto out;
624 	}
625 
626 	iov[0].iov_base = (char *)midQ->resp_buf;
627 	iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
628 	if (midQ->largeBuf)
629 		*pRespBufType = CIFS_LARGE_BUFFER;
630 	else
631 		*pRespBufType = CIFS_SMALL_BUFFER;
632 
633 	rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
634 
635 	/* mark it so buf will not be freed by delete_mid */
636 	if ((flags & CIFS_NO_RESP) == 0)
637 		midQ->resp_buf = NULL;
638 out:
639 	delete_mid(midQ);
640 	atomic_dec(&ses->server->inFlight);
641 	wake_up(&ses->server->request_q);
642 
643 	return rc;
644 }
645 
646 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int long_op)647 SendReceive(const unsigned int xid, struct cifs_ses *ses,
648 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
649 	    int *pbytes_returned, const int long_op)
650 {
651 	int rc = 0;
652 	struct mid_q_entry *midQ;
653 
654 	if (ses == NULL) {
655 		cERROR(1, "Null smb session");
656 		return -EIO;
657 	}
658 	if (ses->server == NULL) {
659 		cERROR(1, "Null tcp session");
660 		return -EIO;
661 	}
662 
663 	if (ses->server->tcpStatus == CifsExiting)
664 		return -ENOENT;
665 
666 	/* Ensure that we do not send more than 50 overlapping requests
667 	   to the same server. We may make this configurable later or
668 	   use ses->maxReq */
669 
670 	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
671 			MAX_CIFS_HDR_SIZE - 4) {
672 		cERROR(1, "Illegal length, greater than maximum frame, %d",
673 			   be32_to_cpu(in_buf->smb_buf_length));
674 		return -EIO;
675 	}
676 
677 	rc = wait_for_free_request(ses->server, long_op);
678 	if (rc)
679 		return rc;
680 
681 	/* make sure that we sign in the same order that we send on this socket
682 	   and avoid races inside tcp sendmsg code that could cause corruption
683 	   of smb data */
684 
685 	mutex_lock(&ses->server->srv_mutex);
686 
687 	rc = allocate_mid(ses, in_buf, &midQ);
688 	if (rc) {
689 		mutex_unlock(&ses->server->srv_mutex);
690 		/* Update # of requests on wire to server */
691 		atomic_dec(&ses->server->inFlight);
692 		wake_up(&ses->server->request_q);
693 		return rc;
694 	}
695 
696 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
697 	if (rc) {
698 		mutex_unlock(&ses->server->srv_mutex);
699 		goto out;
700 	}
701 
702 	midQ->midState = MID_REQUEST_SUBMITTED;
703 
704 	cifs_in_send_inc(ses->server);
705 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
706 	cifs_in_send_dec(ses->server);
707 	cifs_save_when_sent(midQ);
708 	mutex_unlock(&ses->server->srv_mutex);
709 
710 	if (rc < 0)
711 		goto out;
712 
713 	if (long_op == CIFS_ASYNC_OP)
714 		goto out;
715 
716 	rc = wait_for_response(ses->server, midQ);
717 	if (rc != 0) {
718 		send_nt_cancel(ses->server, in_buf, midQ);
719 		spin_lock(&GlobalMid_Lock);
720 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
721 			/* no longer considered to be "in-flight" */
722 			midQ->callback = DeleteMidQEntry;
723 			spin_unlock(&GlobalMid_Lock);
724 			atomic_dec(&ses->server->inFlight);
725 			wake_up(&ses->server->request_q);
726 			return rc;
727 		}
728 		spin_unlock(&GlobalMid_Lock);
729 	}
730 
731 	rc = cifs_sync_mid_result(midQ, ses->server);
732 	if (rc != 0) {
733 		atomic_dec(&ses->server->inFlight);
734 		wake_up(&ses->server->request_q);
735 		return rc;
736 	}
737 
738 	if (!midQ->resp_buf || !out_buf ||
739 	    midQ->midState != MID_RESPONSE_RECEIVED) {
740 		rc = -EIO;
741 		cERROR(1, "Bad MID state?");
742 		goto out;
743 	}
744 
745 	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
746 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
747 	rc = cifs_check_receive(midQ, ses->server, 0);
748 out:
749 	delete_mid(midQ);
750 	atomic_dec(&ses->server->inFlight);
751 	wake_up(&ses->server->request_q);
752 
753 	return rc;
754 }
755 
756 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
757    blocking lock to return. */
758 
759 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)760 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
761 			struct smb_hdr *in_buf,
762 			struct smb_hdr *out_buf)
763 {
764 	int bytes_returned;
765 	struct cifs_ses *ses = tcon->ses;
766 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
767 
768 	/* We just modify the current in_buf to change
769 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
770 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
771 	   LOCKING_ANDX_CANCEL_LOCK. */
772 
773 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
774 	pSMB->Timeout = 0;
775 	pSMB->hdr.Mid = GetNextMid(ses->server);
776 
777 	return SendReceive(xid, ses, in_buf, out_buf,
778 			&bytes_returned, 0);
779 }
780 
781 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)782 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
783 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
784 	    int *pbytes_returned)
785 {
786 	int rc = 0;
787 	int rstart = 0;
788 	struct mid_q_entry *midQ;
789 	struct cifs_ses *ses;
790 
791 	if (tcon == NULL || tcon->ses == NULL) {
792 		cERROR(1, "Null smb session");
793 		return -EIO;
794 	}
795 	ses = tcon->ses;
796 
797 	if (ses->server == NULL) {
798 		cERROR(1, "Null tcp session");
799 		return -EIO;
800 	}
801 
802 	if (ses->server->tcpStatus == CifsExiting)
803 		return -ENOENT;
804 
805 	/* Ensure that we do not send more than 50 overlapping requests
806 	   to the same server. We may make this configurable later or
807 	   use ses->maxReq */
808 
809 	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
810 			MAX_CIFS_HDR_SIZE - 4) {
811 		cERROR(1, "Illegal length, greater than maximum frame, %d",
812 			   be32_to_cpu(in_buf->smb_buf_length));
813 		return -EIO;
814 	}
815 
816 	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
817 	if (rc)
818 		return rc;
819 
820 	/* make sure that we sign in the same order that we send on this socket
821 	   and avoid races inside tcp sendmsg code that could cause corruption
822 	   of smb data */
823 
824 	mutex_lock(&ses->server->srv_mutex);
825 
826 	rc = allocate_mid(ses, in_buf, &midQ);
827 	if (rc) {
828 		mutex_unlock(&ses->server->srv_mutex);
829 		return rc;
830 	}
831 
832 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
833 	if (rc) {
834 		delete_mid(midQ);
835 		mutex_unlock(&ses->server->srv_mutex);
836 		return rc;
837 	}
838 
839 	midQ->midState = MID_REQUEST_SUBMITTED;
840 	cifs_in_send_inc(ses->server);
841 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
842 	cifs_in_send_dec(ses->server);
843 	cifs_save_when_sent(midQ);
844 	mutex_unlock(&ses->server->srv_mutex);
845 
846 	if (rc < 0) {
847 		delete_mid(midQ);
848 		return rc;
849 	}
850 
851 	/* Wait for a reply - allow signals to interrupt. */
852 	rc = wait_event_interruptible(ses->server->response_q,
853 		(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
854 		((ses->server->tcpStatus != CifsGood) &&
855 		 (ses->server->tcpStatus != CifsNew)));
856 
857 	/* Were we interrupted by a signal ? */
858 	if ((rc == -ERESTARTSYS) &&
859 		(midQ->midState == MID_REQUEST_SUBMITTED) &&
860 		((ses->server->tcpStatus == CifsGood) ||
861 		 (ses->server->tcpStatus == CifsNew))) {
862 
863 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
864 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
865 			   blocking lock to return. */
866 			rc = send_nt_cancel(ses->server, in_buf, midQ);
867 			if (rc) {
868 				delete_mid(midQ);
869 				return rc;
870 			}
871 		} else {
872 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
873 			   to cause the blocking lock to return. */
874 
875 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
876 
877 			/* If we get -ENOLCK back the lock may have
878 			   already been removed. Don't exit in this case. */
879 			if (rc && rc != -ENOLCK) {
880 				delete_mid(midQ);
881 				return rc;
882 			}
883 		}
884 
885 		rc = wait_for_response(ses->server, midQ);
886 		if (rc) {
887 			send_nt_cancel(ses->server, in_buf, midQ);
888 			spin_lock(&GlobalMid_Lock);
889 			if (midQ->midState == MID_REQUEST_SUBMITTED) {
890 				/* no longer considered to be "in-flight" */
891 				midQ->callback = DeleteMidQEntry;
892 				spin_unlock(&GlobalMid_Lock);
893 				return rc;
894 			}
895 			spin_unlock(&GlobalMid_Lock);
896 		}
897 
898 		/* We got the response - restart system call. */
899 		rstart = 1;
900 	}
901 
902 	rc = cifs_sync_mid_result(midQ, ses->server);
903 	if (rc != 0)
904 		return rc;
905 
906 	/* rcvd frame is ok */
907 	if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
908 		rc = -EIO;
909 		cERROR(1, "Bad MID state?");
910 		goto out;
911 	}
912 
913 	*pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
914 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
915 	rc = cifs_check_receive(midQ, ses->server, 0);
916 out:
917 	delete_mid(midQ);
918 	if (rstart && rc == -EACCES)
919 		return -ERESTARTSYS;
920 	return rc;
921 }
922