Lines Matching full:ctrl

140 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)  in nvmet_async_events_failall()  argument
144 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
145 while (ctrl->nr_async_event_cmds) { in nvmet_async_events_failall()
146 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_failall()
147 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
149 mutex_lock(&ctrl->lock); in nvmet_async_events_failall()
151 mutex_unlock(&ctrl->lock); in nvmet_async_events_failall()
154 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) in nvmet_async_events_process() argument
159 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
160 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { in nvmet_async_events_process()
161 aen = list_first_entry(&ctrl->async_events, in nvmet_async_events_process()
163 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_process()
169 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
170 trace_nvmet_async_event(ctrl, req->cqe->result.u32); in nvmet_async_events_process()
172 mutex_lock(&ctrl->lock); in nvmet_async_events_process()
174 mutex_unlock(&ctrl->lock); in nvmet_async_events_process()
177 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) in nvmet_async_events_free() argument
181 mutex_lock(&ctrl->lock); in nvmet_async_events_free()
182 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { in nvmet_async_events_free()
186 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
191 struct nvmet_ctrl *ctrl = in nvmet_async_event_work() local
194 nvmet_async_events_process(ctrl); in nvmet_async_event_work()
197 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, in nvmet_add_async_event() argument
210 mutex_lock(&ctrl->lock); in nvmet_add_async_event()
211 list_add_tail(&aen->entry, &ctrl->async_events); in nvmet_add_async_event()
212 mutex_unlock(&ctrl->lock); in nvmet_add_async_event()
214 queue_work(nvmet_wq, &ctrl->async_event_work); in nvmet_add_async_event()
217 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) in nvmet_add_to_changed_ns_log() argument
221 mutex_lock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
222 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) in nvmet_add_to_changed_ns_log()
225 for (i = 0; i < ctrl->nr_changed_ns; i++) { in nvmet_add_to_changed_ns_log()
226 if (ctrl->changed_ns_list[i] == nsid) in nvmet_add_to_changed_ns_log()
230 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { in nvmet_add_to_changed_ns_log()
231 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); in nvmet_add_to_changed_ns_log()
232 ctrl->nr_changed_ns = U32_MAX; in nvmet_add_to_changed_ns_log()
236 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; in nvmet_add_to_changed_ns_log()
238 mutex_unlock(&ctrl->lock); in nvmet_add_to_changed_ns_log()
243 struct nvmet_ctrl *ctrl; in nvmet_ns_changed() local
247 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ns_changed()
248 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); in nvmet_ns_changed()
249 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) in nvmet_ns_changed()
251 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_ns_changed()
260 struct nvmet_ctrl *ctrl; in nvmet_send_ana_event() local
263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_send_ana_event()
264 if (port && ctrl->port != port) in nvmet_send_ana_event()
266 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) in nvmet_send_ana_event()
268 nvmet_add_async_event(ctrl, NVME_AER_NOTICE, in nvmet_send_ana_event()
309 struct nvmet_ctrl *ctrl; in nvmet_port_del_ctrls() local
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_port_del_ctrls()
313 if (ctrl->port == port) in nvmet_port_del_ctrls()
314 ctrl->ops->delete_ctrl(ctrl); in nvmet_port_del_ctrls()
386 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), in nvmet_keep_alive_timer() local
388 bool reset_tbkas = ctrl->reset_tbkas; in nvmet_keep_alive_timer()
390 ctrl->reset_tbkas = false; in nvmet_keep_alive_timer()
392 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", in nvmet_keep_alive_timer()
393 ctrl->cntlid); in nvmet_keep_alive_timer()
394 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_keep_alive_timer()
398 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", in nvmet_keep_alive_timer()
399 ctrl->cntlid, ctrl->kato); in nvmet_keep_alive_timer()
401 nvmet_ctrl_fatal_error(ctrl); in nvmet_keep_alive_timer()
404 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_start_keep_alive_timer() argument
406 if (unlikely(ctrl->kato == 0)) in nvmet_start_keep_alive_timer()
409 pr_debug("ctrl %d start keep-alive timer for %d secs\n", in nvmet_start_keep_alive_timer()
410 ctrl->cntlid, ctrl->kato); in nvmet_start_keep_alive_timer()
412 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_keep_alive_timer()
415 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_stop_keep_alive_timer() argument
417 if (unlikely(ctrl->kato == 0)) in nvmet_stop_keep_alive_timer()
420 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); in nvmet_stop_keep_alive_timer()
422 cancel_delayed_work_sync(&ctrl->ka_work); in nvmet_stop_keep_alive_timer()
502 * Note: ctrl->subsys->lock should be held when calling this function
504 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, in nvmet_p2pmem_ns_add_p2p() argument
511 if (!ctrl->p2p_client || !ns->use_p2pmem) in nvmet_p2pmem_ns_add_p2p()
515 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); in nvmet_p2pmem_ns_add_p2p()
521 clients[0] = ctrl->p2p_client; in nvmet_p2pmem_ns_add_p2p()
527 dev_name(ctrl->p2p_client), ns->device_path); in nvmet_p2pmem_ns_add_p2p()
532 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); in nvmet_p2pmem_ns_add_p2p()
555 struct nvmet_ctrl *ctrl; in nvmet_ns_enable() local
583 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
584 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_ns_enable()
611 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
612 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_enable()
621 struct nvmet_ctrl *ctrl; in nvmet_ns_disable() local
632 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
633 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); in nvmet_ns_disable()
711 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_set_error() local
717 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) in nvmet_set_error()
720 spin_lock_irqsave(&ctrl->error_lock, flags); in nvmet_set_error()
721 ctrl->err_counter++; in nvmet_set_error()
723 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; in nvmet_set_error()
725 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); in nvmet_set_error()
732 spin_unlock_irqrestore(&ctrl->error_lock, flags); in nvmet_set_error()
766 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_setup() argument
773 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
780 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
792 struct nvmet_ctrl *ctrl = sq->ctrl; in nvmet_sq_destroy() local
798 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) in nvmet_sq_destroy()
799 nvmet_async_events_failall(ctrl); in nvmet_sq_destroy()
806 if (ctrl) { in nvmet_sq_destroy()
813 ctrl->reset_tbkas = true; in nvmet_sq_destroy()
814 sq->ctrl->sqs[sq->qid] = NULL; in nvmet_sq_destroy()
815 nvmet_ctrl_put(ctrl); in nvmet_sq_destroy()
816 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
960 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
978 if (sq->ctrl) in nvmet_req_init()
979 sq->ctrl->reset_tbkas = true; in nvmet_req_init()
1052 !req->sq->ctrl || !req->sq->qid || !req->ns) in nvmet_req_find_p2p_dev()
1054 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); in nvmet_req_find_p2p_dev()
1150 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) in nvmet_start_ctrl() argument
1152 lockdep_assert_held(&ctrl->lock); in nvmet_start_ctrl()
1160 if (!nvmet_is_disc_subsys(ctrl->subsys) && in nvmet_start_ctrl()
1161 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || in nvmet_start_ctrl()
1162 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { in nvmet_start_ctrl()
1163 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1167 if (nvmet_cc_mps(ctrl->cc) != 0 || in nvmet_start_ctrl()
1168 nvmet_cc_ams(ctrl->cc) != 0 || in nvmet_start_ctrl()
1169 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) { in nvmet_start_ctrl()
1170 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
1174 ctrl->csts = NVME_CSTS_RDY; in nvmet_start_ctrl()
1182 if (ctrl->kato) in nvmet_start_ctrl()
1183 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_ctrl()
1186 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) in nvmet_clear_ctrl() argument
1188 lockdep_assert_held(&ctrl->lock); in nvmet_clear_ctrl()
1191 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_clear_ctrl()
1192 ctrl->cc = 0; in nvmet_clear_ctrl()
1195 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) in nvmet_update_cc() argument
1199 mutex_lock(&ctrl->lock); in nvmet_update_cc()
1200 old = ctrl->cc; in nvmet_update_cc()
1201 ctrl->cc = new; in nvmet_update_cc()
1204 nvmet_start_ctrl(ctrl); in nvmet_update_cc()
1206 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1208 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
1209 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1212 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
1213 mutex_unlock(&ctrl->lock); in nvmet_update_cc()
1216 static void nvmet_init_cap(struct nvmet_ctrl *ctrl) in nvmet_init_cap() argument
1219 ctrl->cap = (1ULL << 37); in nvmet_init_cap()
1221 ctrl->cap |= (1ULL << 43); in nvmet_init_cap()
1223 ctrl->cap |= (15ULL << 24); in nvmet_init_cap()
1225 if (ctrl->ops->get_max_queue_size) in nvmet_init_cap()
1226 ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; in nvmet_init_cap()
1228 ctrl->cap |= NVMET_QUEUE_SIZE - 1; in nvmet_init_cap()
1230 if (nvmet_is_passthru_subsys(ctrl->subsys)) in nvmet_init_cap()
1231 nvmet_passthrough_override_cap(ctrl); in nvmet_init_cap()
1238 struct nvmet_ctrl *ctrl = NULL; in nvmet_ctrl_find_get() local
1250 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
1251 if (ctrl->cntlid == cntlid) { in nvmet_ctrl_find_get()
1252 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { in nvmet_ctrl_find_get()
1256 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_ctrl_find_get()
1259 /* ctrl found */ in nvmet_ctrl_find_get()
1264 ctrl = NULL; /* ctrl not found */ in nvmet_ctrl_find_get()
1273 return ctrl; in nvmet_ctrl_find_get()
1278 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { in nvmet_check_ctrl_status()
1284 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { in nvmet_check_ctrl_status()
1318 * Note: ctrl->subsys->lock should be held when calling this function
1320 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, in nvmet_setup_p2p_ns_map() argument
1329 ctrl->p2p_client = get_device(req->p2p_client); in nvmet_setup_p2p_ns_map()
1331 xa_for_each(&ctrl->subsys->namespaces, idx, ns) in nvmet_setup_p2p_ns_map()
1332 nvmet_p2pmem_ns_add_p2p(ctrl, ns); in nvmet_setup_p2p_ns_map()
1336 * Note: ctrl->subsys->lock should be held when calling this function
1338 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) in nvmet_release_p2p_ns_map() argument
1343 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) in nvmet_release_p2p_ns_map()
1346 put_device(ctrl->p2p_client); in nvmet_release_p2p_ns_map()
1351 struct nvmet_ctrl *ctrl = in nvmet_fatal_error_handler() local
1354 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); in nvmet_fatal_error_handler()
1355 ctrl->ops->delete_ctrl(ctrl); in nvmet_fatal_error_handler()
1362 struct nvmet_ctrl *ctrl; in nvmet_alloc_ctrl() local
1389 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvmet_alloc_ctrl()
1390 if (!ctrl) in nvmet_alloc_ctrl()
1392 mutex_init(&ctrl->lock); in nvmet_alloc_ctrl()
1394 ctrl->port = req->port; in nvmet_alloc_ctrl()
1395 ctrl->ops = req->ops; in nvmet_alloc_ctrl()
1399 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) in nvmet_alloc_ctrl()
1403 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); in nvmet_alloc_ctrl()
1404 INIT_LIST_HEAD(&ctrl->async_events); in nvmet_alloc_ctrl()
1405 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); in nvmet_alloc_ctrl()
1406 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); in nvmet_alloc_ctrl()
1407 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); in nvmet_alloc_ctrl()
1409 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1410 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
1412 kref_init(&ctrl->ref); in nvmet_alloc_ctrl()
1413 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
1414 nvmet_init_cap(ctrl); in nvmet_alloc_ctrl()
1415 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); in nvmet_alloc_ctrl()
1417 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, in nvmet_alloc_ctrl()
1419 if (!ctrl->changed_ns_list) in nvmet_alloc_ctrl()
1422 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
1425 if (!ctrl->sqs) in nvmet_alloc_ctrl()
1435 ctrl->cntlid = ret; in nvmet_alloc_ctrl()
1441 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) in nvmet_alloc_ctrl()
1445 ctrl->kato = DIV_ROUND_UP(kato, 1000); in nvmet_alloc_ctrl()
1447 ctrl->err_counter = 0; in nvmet_alloc_ctrl()
1448 spin_lock_init(&ctrl->error_lock); in nvmet_alloc_ctrl()
1450 nvmet_start_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
1453 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
1454 nvmet_setup_p2p_ns_map(ctrl, req); in nvmet_alloc_ctrl()
1457 *ctrlp = ctrl; in nvmet_alloc_ctrl()
1461 kfree(ctrl->sqs); in nvmet_alloc_ctrl()
1463 kfree(ctrl->changed_ns_list); in nvmet_alloc_ctrl()
1465 kfree(ctrl); in nvmet_alloc_ctrl()
1474 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); in nvmet_ctrl_free() local
1475 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free()
1478 nvmet_release_p2p_ns_map(ctrl); in nvmet_ctrl_free()
1479 list_del(&ctrl->subsys_entry); in nvmet_ctrl_free()
1482 nvmet_stop_keep_alive_timer(ctrl); in nvmet_ctrl_free()
1484 flush_work(&ctrl->async_event_work); in nvmet_ctrl_free()
1485 cancel_work_sync(&ctrl->fatal_err_work); in nvmet_ctrl_free()
1487 nvmet_destroy_auth(ctrl); in nvmet_ctrl_free()
1489 ida_free(&cntlid_ida, ctrl->cntlid); in nvmet_ctrl_free()
1491 nvmet_async_events_free(ctrl); in nvmet_ctrl_free()
1492 kfree(ctrl->sqs); in nvmet_ctrl_free()
1493 kfree(ctrl->changed_ns_list); in nvmet_ctrl_free()
1494 kfree(ctrl); in nvmet_ctrl_free()
1499 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) in nvmet_ctrl_put() argument
1501 kref_put(&ctrl->ref, nvmet_ctrl_free); in nvmet_ctrl_put()
1504 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) in nvmet_ctrl_fatal_error() argument
1506 mutex_lock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1507 if (!(ctrl->csts & NVME_CSTS_CFS)) { in nvmet_ctrl_fatal_error()
1508 ctrl->csts |= NVME_CSTS_CFS; in nvmet_ctrl_fatal_error()
1509 queue_work(nvmet_wq, &ctrl->fatal_err_work); in nvmet_ctrl_fatal_error()
1511 mutex_unlock(&ctrl->lock); in nvmet_ctrl_fatal_error()
1631 struct nvmet_ctrl *ctrl; in nvmet_subsys_del_ctrls() local
1634 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_subsys_del_ctrls()
1635 ctrl->ops->delete_ctrl(ctrl); in nvmet_subsys_del_ctrls()