1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org> 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/mutex.h> 8 #include <linux/freezer.h> 9 #include <linux/module.h> 10 11 #include "server.h" 12 #include "smb_common.h" 13 #include "mgmt/ksmbd_ida.h" 14 #include "connection.h" 15 #include "transport_tcp.h" 16 #include "transport_rdma.h" 17 18 static DEFINE_MUTEX(init_lock); 19 20 static struct ksmbd_conn_ops default_conn_ops; 21 22 LIST_HEAD(conn_list); 23 DECLARE_RWSEM(conn_list_lock); 24 25 /** 26 * ksmbd_conn_free() - free resources of the connection instance 27 * 28 * @conn: connection instance to be cleaned up 29 * 30 * During the thread termination, the corresponding conn instance 31 * resources(sock/memory) are released and finally the conn object is freed. 32 */ 33 void ksmbd_conn_free(struct ksmbd_conn *conn) 34 { 35 down_write(&conn_list_lock); 36 list_del(&conn->conns_list); 37 up_write(&conn_list_lock); 38 39 xa_destroy(&conn->sessions); 40 kvfree(conn->request_buf); 41 kfree(conn->preauth_info); 42 if (atomic_dec_and_test(&conn->refcnt)) { 43 conn->transport->ops->free_transport(conn->transport); 44 kfree(conn); 45 } 46 } 47 48 /** 49 * ksmbd_conn_alloc() - initialize a new connection instance 50 * 51 * Return: ksmbd_conn struct on success, otherwise NULL 52 */ 53 struct ksmbd_conn *ksmbd_conn_alloc(void) 54 { 55 struct ksmbd_conn *conn; 56 57 conn = kzalloc(sizeof(struct ksmbd_conn), KSMBD_DEFAULT_GFP); 58 if (!conn) 59 return NULL; 60 61 conn->need_neg = true; 62 ksmbd_conn_set_new(conn); 63 conn->local_nls = load_nls("utf8"); 64 if (!conn->local_nls) 65 conn->local_nls = load_nls_default(); 66 if (IS_ENABLED(CONFIG_UNICODE)) 67 conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); 68 else 69 conn->um = ERR_PTR(-EOPNOTSUPP); 70 if (IS_ERR(conn->um)) 71 conn->um = NULL; 72 atomic_set(&conn->req_running, 0); 73 atomic_set(&conn->r_count, 0); 74 atomic_set(&conn->refcnt, 1); 75 conn->total_credits = 1; 76 conn->outstanding_credits = 0; 77 78 init_waitqueue_head(&conn->req_running_q); 79 init_waitqueue_head(&conn->r_count_q); 80 INIT_LIST_HEAD(&conn->conns_list); 81 INIT_LIST_HEAD(&conn->requests); 82 INIT_LIST_HEAD(&conn->async_requests); 83 spin_lock_init(&conn->request_lock); 84 spin_lock_init(&conn->credits_lock); 85 ida_init(&conn->async_ida); 86 xa_init(&conn->sessions); 87 88 spin_lock_init(&conn->llist_lock); 89 INIT_LIST_HEAD(&conn->lock_list); 90 91 init_rwsem(&conn->session_lock); 92 93 down_write(&conn_list_lock); 94 list_add(&conn->conns_list, &conn_list); 95 up_write(&conn_list_lock); 96 return conn; 97 } 98 99 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) 100 { 101 struct ksmbd_conn *t; 102 bool ret = false; 103 104 down_read(&conn_list_lock); 105 list_for_each_entry(t, &conn_list, conns_list) { 106 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) 107 continue; 108 109 ret = true; 110 break; 111 } 112 up_read(&conn_list_lock); 113 return ret; 114 } 115 116 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) 117 { 118 struct ksmbd_conn *conn = work->conn; 119 struct list_head *requests_queue = NULL; 120 121 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) 122 requests_queue = &conn->requests; 123 124 atomic_inc(&conn->req_running); 125 if (requests_queue) { 126 spin_lock(&conn->request_lock); 127 list_add_tail(&work->request_entry, requests_queue); 128 spin_unlock(&conn->request_lock); 129 } 130 } 131 132 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) 133 { 134 struct ksmbd_conn *conn = work->conn; 135 136 atomic_dec(&conn->req_running); 137 if (waitqueue_active(&conn->req_running_q)) 138 wake_up(&conn->req_running_q); 139 140 if (list_empty(&work->request_entry) && 141 list_empty(&work->async_request_entry)) 142 return; 143 144 spin_lock(&conn->request_lock); 145 list_del_init(&work->request_entry); 146 spin_unlock(&conn->request_lock); 147 if (work->asynchronous) 148 release_async_work(work); 149 150 wake_up_all(&conn->req_running_q); 151 } 152 153 void ksmbd_conn_lock(struct ksmbd_conn *conn) 154 { 155 mutex_lock(&conn->srv_mutex); 156 } 157 158 void ksmbd_conn_unlock(struct ksmbd_conn *conn) 159 { 160 mutex_unlock(&conn->srv_mutex); 161 } 162 163 void ksmbd_all_conn_set_status(u64 sess_id, u32 status) 164 { 165 struct ksmbd_conn *conn; 166 167 down_read(&conn_list_lock); 168 list_for_each_entry(conn, &conn_list, conns_list) { 169 if (conn->binding || xa_load(&conn->sessions, sess_id)) 170 WRITE_ONCE(conn->status, status); 171 } 172 up_read(&conn_list_lock); 173 } 174 175 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn) 176 { 177 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); 178 } 179 180 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) 181 { 182 struct ksmbd_conn *conn; 183 int rc, retry_count = 0, max_timeout = 120; 184 int rcount = 1; 185 186 retry_idle: 187 if (retry_count >= max_timeout) 188 return -EIO; 189 190 down_read(&conn_list_lock); 191 list_for_each_entry(conn, &conn_list, conns_list) { 192 if (conn->binding || xa_load(&conn->sessions, sess_id)) { 193 if (conn == curr_conn) 194 rcount = 2; 195 if (atomic_read(&conn->req_running) >= rcount) { 196 rc = wait_event_timeout(conn->req_running_q, 197 atomic_read(&conn->req_running) < rcount, 198 HZ); 199 if (!rc) { 200 up_read(&conn_list_lock); 201 retry_count++; 202 goto retry_idle; 203 } 204 } 205 } 206 } 207 up_read(&conn_list_lock); 208 209 return 0; 210 } 211 212 int ksmbd_conn_write(struct ksmbd_work *work) 213 { 214 struct ksmbd_conn *conn = work->conn; 215 int sent; 216 217 if (!work->response_buf) { 218 pr_err("NULL response header\n"); 219 return -EINVAL; 220 } 221 222 if (work->send_no_response) 223 return 0; 224 225 if (!work->iov_idx) 226 return -EINVAL; 227 228 ksmbd_conn_lock(conn); 229 sent = conn->transport->ops->writev(conn->transport, work->iov, 230 work->iov_cnt, 231 get_rfc1002_len(work->iov[0].iov_base) + 4, 232 work->need_invalidate_rkey, 233 work->remote_key); 234 ksmbd_conn_unlock(conn); 235 236 if (sent < 0) { 237 pr_err("Failed to send message: %d\n", sent); 238 return sent; 239 } 240 241 return 0; 242 } 243 244 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, 245 void *buf, unsigned int buflen, 246 struct smb2_buffer_desc_v1 *desc, 247 unsigned int desc_len) 248 { 249 int ret = -EINVAL; 250 251 if (conn->transport->ops->rdma_read) 252 ret = conn->transport->ops->rdma_read(conn->transport, 253 buf, buflen, 254 desc, desc_len); 255 return ret; 256 } 257 258 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, 259 void *buf, unsigned int buflen, 260 struct smb2_buffer_desc_v1 *desc, 261 unsigned int desc_len) 262 { 263 int ret = -EINVAL; 264 265 if (conn->transport->ops->rdma_write) 266 ret = conn->transport->ops->rdma_write(conn->transport, 267 buf, buflen, 268 desc, desc_len); 269 return ret; 270 } 271 272 bool ksmbd_conn_alive(struct ksmbd_conn *conn) 273 { 274 if (!ksmbd_server_running()) 275 return false; 276 277 if (ksmbd_conn_exiting(conn)) 278 return false; 279 280 if (kthread_should_stop()) 281 return false; 282 283 if (atomic_read(&conn->stats.open_files_count) > 0) 284 return true; 285 286 /* 287 * Stop current session if the time that get last request from client 288 * is bigger than deadtime user configured and opening file count is 289 * zero. 290 */ 291 if (server_conf.deadtime > 0 && 292 time_after(jiffies, conn->last_active + server_conf.deadtime)) { 293 ksmbd_debug(CONN, "No response from client in %lu minutes\n", 294 server_conf.deadtime / SMB_ECHO_INTERVAL); 295 return false; 296 } 297 return true; 298 } 299 300 #define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr)) 301 #define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4) 302 303 /** 304 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests 305 * @p: connection instance 306 * 307 * One thread each per connection 308 * 309 * Return: 0 on success 310 */ 311 int ksmbd_conn_handler_loop(void *p) 312 { 313 struct ksmbd_conn *conn = (struct ksmbd_conn *)p; 314 struct ksmbd_transport *t = conn->transport; 315 unsigned int pdu_size, max_allowed_pdu_size, max_req; 316 char hdr_buf[4] = {0,}; 317 int size; 318 319 mutex_init(&conn->srv_mutex); 320 __module_get(THIS_MODULE); 321 322 if (t->ops->prepare && t->ops->prepare(t)) 323 goto out; 324 325 max_req = server_conf.max_inflight_req; 326 conn->last_active = jiffies; 327 set_freezable(); 328 while (ksmbd_conn_alive(conn)) { 329 if (try_to_freeze()) 330 continue; 331 332 kvfree(conn->request_buf); 333 conn->request_buf = NULL; 334 335 recheck: 336 if (atomic_read(&conn->req_running) + 1 > max_req) { 337 wait_event_interruptible(conn->req_running_q, 338 atomic_read(&conn->req_running) < max_req); 339 goto recheck; 340 } 341 342 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); 343 if (size != sizeof(hdr_buf)) 344 break; 345 346 pdu_size = get_rfc1002_len(hdr_buf); 347 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); 348 349 if (ksmbd_conn_good(conn)) 350 max_allowed_pdu_size = 351 SMB3_MAX_MSGSIZE + conn->vals->max_write_size; 352 else 353 max_allowed_pdu_size = SMB3_MAX_MSGSIZE; 354 355 if (pdu_size > max_allowed_pdu_size) { 356 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", 357 pdu_size, max_allowed_pdu_size, 358 READ_ONCE(conn->status)); 359 break; 360 } 361 362 /* 363 * Check maximum pdu size(0x00FFFFFF). 364 */ 365 if (pdu_size > MAX_STREAM_PROT_LEN) 366 break; 367 368 if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE) 369 break; 370 371 /* 4 for rfc1002 length field */ 372 /* 1 for implied bcc[0] */ 373 size = pdu_size + 4 + 1; 374 conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP); 375 if (!conn->request_buf) 376 break; 377 378 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); 379 380 /* 381 * We already read 4 bytes to find out PDU size, now 382 * read in PDU 383 */ 384 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); 385 if (size < 0) { 386 pr_err("sock_read failed: %d\n", size); 387 break; 388 } 389 390 if (size != pdu_size) { 391 pr_err("PDU error. Read: %d, Expected: %d\n", 392 size, pdu_size); 393 continue; 394 } 395 396 if (!ksmbd_smb_request(conn)) 397 break; 398 399 if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId == 400 SMB2_PROTO_NUMBER) { 401 if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE) 402 break; 403 } 404 405 if (!default_conn_ops.process_fn) { 406 pr_err("No connection request callback\n"); 407 break; 408 } 409 410 if (default_conn_ops.process_fn(conn)) { 411 pr_err("Cannot handle request\n"); 412 break; 413 } 414 } 415 416 out: 417 ksmbd_conn_set_releasing(conn); 418 /* Wait till all reference dropped to the Server object*/ 419 ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count)); 420 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); 421 422 if (IS_ENABLED(CONFIG_UNICODE)) 423 utf8_unload(conn->um); 424 unload_nls(conn->local_nls); 425 if (default_conn_ops.terminate_fn) 426 default_conn_ops.terminate_fn(conn); 427 t->ops->disconnect(t); 428 module_put(THIS_MODULE); 429 return 0; 430 } 431 432 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) 433 { 434 default_conn_ops.process_fn = ops->process_fn; 435 default_conn_ops.terminate_fn = ops->terminate_fn; 436 } 437 438 void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) 439 { 440 atomic_inc(&conn->r_count); 441 } 442 443 void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) 444 { 445 /* 446 * Checking waitqueue to dropping pending requests on 447 * disconnection. waitqueue_active is safe because it 448 * uses atomic operation for condition. 449 */ 450 atomic_inc(&conn->refcnt); 451 if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) 452 wake_up(&conn->r_count_q); 453 454 if (atomic_dec_and_test(&conn->refcnt)) 455 kfree(conn); 456 } 457 458 int ksmbd_conn_transport_init(void) 459 { 460 int ret; 461 462 mutex_lock(&init_lock); 463 ret = ksmbd_tcp_init(); 464 if (ret) { 465 pr_err("Failed to init TCP subsystem: %d\n", ret); 466 goto out; 467 } 468 469 ret = ksmbd_rdma_init(); 470 if (ret) { 471 pr_err("Failed to init RDMA subsystem: %d\n", ret); 472 goto out; 473 } 474 out: 475 mutex_unlock(&init_lock); 476 return ret; 477 } 478 479 static void stop_sessions(void) 480 { 481 struct ksmbd_conn *conn; 482 struct ksmbd_transport *t; 483 484 again: 485 down_read(&conn_list_lock); 486 list_for_each_entry(conn, &conn_list, conns_list) { 487 t = conn->transport; 488 ksmbd_conn_set_exiting(conn); 489 if (t->ops->shutdown) { 490 up_read(&conn_list_lock); 491 t->ops->shutdown(t); 492 down_read(&conn_list_lock); 493 } 494 } 495 up_read(&conn_list_lock); 496 497 if (!list_empty(&conn_list)) { 498 msleep(100); 499 goto again; 500 } 501 } 502 503 void ksmbd_conn_transport_destroy(void) 504 { 505 mutex_lock(&init_lock); 506 ksmbd_tcp_destroy(); 507 ksmbd_rdma_destroy(); 508 stop_sessions(); 509 mutex_unlock(&init_lock); 510 } 511