Lines Matching +full:no +full:- +full:memory +full:- +full:wc
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
43 /* Register user buffer memory and initialize passive rdma
45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
51 struct iscsi_iser_task *iser_task = task->dd_data; in iser_prepare_read_cmd()
54 struct iser_ctrl *hdr = &iser_task->desc.iser_header; in iser_prepare_read_cmd()
55 struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN]; in iser_prepare_read_cmd()
64 if (scsi_prot_sg_count(iser_task->sc)) { in iser_prepare_read_cmd()
65 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; in iser_prepare_read_cmd()
77 iser_err("Failed to set up Data-IN RDMA\n"); in iser_prepare_read_cmd()
80 mem_reg = &iser_task->rdma_reg[ISER_DIR_IN]; in iser_prepare_read_cmd()
82 hdr->flags |= ISER_RSV; in iser_prepare_read_cmd()
83 hdr->read_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_read_cmd()
84 hdr->read_va = cpu_to_be64(mem_reg->sge.addr); in iser_prepare_read_cmd()
87 task->itt, mem_reg->rkey, in iser_prepare_read_cmd()
88 (unsigned long long)mem_reg->sge.addr); in iser_prepare_read_cmd()
93 /* Register user buffer memory and initialize passive rdma
95 * task->data[ISER_DIR_OUT].data_len, Protection size
96 * is stored at task->prot[ISER_DIR_OUT].data_len
104 struct iscsi_iser_task *iser_task = task->dd_data; in iser_prepare_write_cmd()
107 struct iser_ctrl *hdr = &iser_task->desc.iser_header; in iser_prepare_write_cmd()
108 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; in iser_prepare_write_cmd()
109 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1]; in iser_prepare_write_cmd()
118 if (scsi_prot_sg_count(iser_task->sc)) { in iser_prepare_write_cmd()
119 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; in iser_prepare_write_cmd()
130 buf_out->data_len == imm_sz); in iser_prepare_write_cmd()
136 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; in iser_prepare_write_cmd()
139 hdr->flags |= ISER_WSV; in iser_prepare_write_cmd()
140 if (buf_out->data_len > imm_sz) { in iser_prepare_write_cmd()
141 hdr->write_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_write_cmd()
142 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); in iser_prepare_write_cmd()
146 task->itt, mem_reg->rkey, in iser_prepare_write_cmd()
147 (unsigned long long)mem_reg->sge.addr, unsol_sz); in iser_prepare_write_cmd()
152 task->itt, imm_sz); in iser_prepare_write_cmd()
153 tx_dsg->addr = mem_reg->sge.addr; in iser_prepare_write_cmd()
154 tx_dsg->length = imm_sz; in iser_prepare_write_cmd()
155 tx_dsg->lkey = mem_reg->sge.lkey; in iser_prepare_write_cmd()
156 iser_task->desc.num_sge = 2; in iser_prepare_write_cmd()
166 struct iser_device *device = iser_conn->ib_conn.device; in iser_create_send_desc()
168 ib_dma_sync_single_for_cpu(device->ib_device, in iser_create_send_desc()
169 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); in iser_create_send_desc()
171 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); in iser_create_send_desc()
172 tx_desc->iser_header.flags = ISER_VER; in iser_create_send_desc()
173 tx_desc->num_sge = 1; in iser_create_send_desc()
178 struct iser_device *device = iser_conn->ib_conn.device; in iser_free_login_buf()
179 struct iser_login_desc *desc = &iser_conn->login_desc; in iser_free_login_buf()
181 if (!desc->req) in iser_free_login_buf()
184 ib_dma_unmap_single(device->ib_device, desc->req_dma, in iser_free_login_buf()
187 ib_dma_unmap_single(device->ib_device, desc->rsp_dma, in iser_free_login_buf()
190 kfree(desc->req); in iser_free_login_buf()
191 kfree(desc->rsp); in iser_free_login_buf()
194 desc->req = NULL; in iser_free_login_buf()
195 desc->rsp = NULL; in iser_free_login_buf()
200 struct iser_device *device = iser_conn->ib_conn.device; in iser_alloc_login_buf()
201 struct iser_login_desc *desc = &iser_conn->login_desc; in iser_alloc_login_buf()
203 desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); in iser_alloc_login_buf()
204 if (!desc->req) in iser_alloc_login_buf()
205 return -ENOMEM; in iser_alloc_login_buf()
207 desc->req_dma = ib_dma_map_single(device->ib_device, desc->req, in iser_alloc_login_buf()
210 if (ib_dma_mapping_error(device->ib_device, in iser_alloc_login_buf()
211 desc->req_dma)) in iser_alloc_login_buf()
214 desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); in iser_alloc_login_buf()
215 if (!desc->rsp) in iser_alloc_login_buf()
218 desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp, in iser_alloc_login_buf()
221 if (ib_dma_mapping_error(device->ib_device, in iser_alloc_login_buf()
222 desc->rsp_dma)) in iser_alloc_login_buf()
228 kfree(desc->rsp); in iser_alloc_login_buf()
230 ib_dma_unmap_single(device->ib_device, desc->req_dma, in iser_alloc_login_buf()
234 kfree(desc->req); in iser_alloc_login_buf()
236 return -ENOMEM; in iser_alloc_login_buf()
246 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_alloc_rx_descriptors()
247 struct iser_device *device = ib_conn->device; in iser_alloc_rx_descriptors()
249 iser_conn->qp_max_recv_dtos = session->cmds_max; in iser_alloc_rx_descriptors()
250 iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ in iser_alloc_rx_descriptors()
251 iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; in iser_alloc_rx_descriptors()
253 if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, in iser_alloc_rx_descriptors()
254 iser_conn->pages_per_mr)) in iser_alloc_rx_descriptors()
260 iser_conn->num_rx_descs = session->cmds_max; in iser_alloc_rx_descriptors()
261 iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs, in iser_alloc_rx_descriptors()
264 if (!iser_conn->rx_descs) in iser_alloc_rx_descriptors()
267 rx_desc = iser_conn->rx_descs; in iser_alloc_rx_descriptors()
269 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) { in iser_alloc_rx_descriptors()
270 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, in iser_alloc_rx_descriptors()
272 if (ib_dma_mapping_error(device->ib_device, dma_addr)) in iser_alloc_rx_descriptors()
275 rx_desc->dma_addr = dma_addr; in iser_alloc_rx_descriptors()
276 rx_desc->cqe.done = iser_task_rsp; in iser_alloc_rx_descriptors()
277 rx_sg = &rx_desc->rx_sg; in iser_alloc_rx_descriptors()
278 rx_sg->addr = rx_desc->dma_addr; in iser_alloc_rx_descriptors()
279 rx_sg->length = ISER_RX_PAYLOAD_SIZE; in iser_alloc_rx_descriptors()
280 rx_sg->lkey = device->pd->local_dma_lkey; in iser_alloc_rx_descriptors()
283 iser_conn->rx_desc_head = 0; in iser_alloc_rx_descriptors()
287 rx_desc = iser_conn->rx_descs; in iser_alloc_rx_descriptors()
289 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, in iser_alloc_rx_descriptors()
291 kfree(iser_conn->rx_descs); in iser_alloc_rx_descriptors()
292 iser_conn->rx_descs = NULL; in iser_alloc_rx_descriptors()
299 return -ENOMEM; in iser_alloc_rx_descriptors()
306 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_free_rx_descriptors()
307 struct iser_device *device = ib_conn->device; in iser_free_rx_descriptors()
311 rx_desc = iser_conn->rx_descs; in iser_free_rx_descriptors()
312 for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) in iser_free_rx_descriptors()
313 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, in iser_free_rx_descriptors()
315 kfree(iser_conn->rx_descs); in iser_free_rx_descriptors()
317 iser_conn->rx_descs = NULL; in iser_free_rx_descriptors()
324 struct iser_conn *iser_conn = conn->dd_data; in iser_post_rx_bufs()
325 struct ib_conn *ib_conn = &iser_conn->ib_conn; in iser_post_rx_bufs()
326 struct iscsi_session *session = conn->session; in iser_post_rx_bufs()
328 iser_dbg("req op %x flags %x\n", req->opcode, req->flags); in iser_post_rx_bufs()
329 /* check if this is the last login - going to full feature phase */ in iser_post_rx_bufs()
330 if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) in iser_post_rx_bufs()
337 WARN_ON(ib_conn->post_recv_buf_count != 1); in iser_post_rx_bufs()
339 if (session->discovery_sess) { in iser_post_rx_bufs()
340 iser_info("Discovery session, re-using login RX buffer\n"); in iser_post_rx_bufs()
344 iser_conn->min_posted_rx); in iser_post_rx_bufs()
347 if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) in iser_post_rx_bufs()
348 return -ENOMEM; in iser_post_rx_bufs()
359 * iser_send_command - send command PDU
366 struct iser_conn *iser_conn = conn->dd_data; in iser_send_command()
367 struct iscsi_iser_task *iser_task = task->dd_data; in iser_send_command()
371 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; in iser_send_command()
372 struct scsi_cmnd *sc = task->sc; in iser_send_command()
373 struct iser_tx_desc *tx_desc = &iser_task->desc; in iser_send_command()
374 u8 sig_count = ++iser_conn->ib_conn.sig_count; in iser_send_command()
376 edtl = ntohl(hdr->data_length); in iser_send_command()
379 tx_desc->type = ISCSI_TX_SCSI_COMMAND; in iser_send_command()
380 tx_desc->cqe.done = iser_cmd_comp; in iser_send_command()
383 if (hdr->flags & ISCSI_FLAG_CMD_READ) { in iser_send_command()
384 data_buf = &iser_task->data[ISER_DIR_IN]; in iser_send_command()
385 prot_buf = &iser_task->prot[ISER_DIR_IN]; in iser_send_command()
387 data_buf = &iser_task->data[ISER_DIR_OUT]; in iser_send_command()
388 prot_buf = &iser_task->prot[ISER_DIR_OUT]; in iser_send_command()
392 data_buf->sg = scsi_sglist(sc); in iser_send_command()
393 data_buf->size = scsi_sg_count(sc); in iser_send_command()
395 data_buf->data_len = scsi_bufflen(sc); in iser_send_command()
398 prot_buf->sg = scsi_prot_sglist(sc); in iser_send_command()
399 prot_buf->size = scsi_prot_sg_count(sc); in iser_send_command()
400 prot_buf->data_len = (data_buf->data_len >> in iser_send_command()
401 ilog2(sc->device->sector_size)) * 8; in iser_send_command()
404 if (hdr->flags & ISCSI_FLAG_CMD_READ) { in iser_send_command()
409 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) { in iser_send_command()
411 task->imm_count, in iser_send_command()
412 task->imm_count + in iser_send_command()
413 task->unsol_r2t.data_length, in iser_send_command()
419 iser_task->status = ISER_TASK_STATUS_STARTED; in iser_send_command()
421 err = iser_post_send(&iser_conn->ib_conn, tx_desc, in iser_send_command()
427 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); in iser_send_command()
432 * iser_send_data_out - send data out PDU
441 struct iser_conn *iser_conn = conn->dd_data; in iser_send_data_out()
442 struct iscsi_iser_task *iser_task = task->dd_data; in iser_send_data_out()
451 itt = (__force uint32_t)hdr->itt; in iser_send_data_out()
452 data_seg_len = ntoh24(hdr->dlength); in iser_send_data_out()
453 buf_offset = ntohl(hdr->offset); in iser_send_data_out()
460 return -ENOMEM; in iser_send_data_out()
462 tx_desc->type = ISCSI_TX_DATAOUT; in iser_send_data_out()
463 tx_desc->cqe.done = iser_dataout_comp; in iser_send_data_out()
464 tx_desc->iser_header.flags = ISER_VER; in iser_send_data_out()
465 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); in iser_send_data_out()
472 mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; in iser_send_data_out()
473 tx_dsg = &tx_desc->tx_sg[1]; in iser_send_data_out()
474 tx_dsg->addr = mem_reg->sge.addr + buf_offset; in iser_send_data_out()
475 tx_dsg->length = data_seg_len; in iser_send_data_out()
476 tx_dsg->lkey = mem_reg->sge.lkey; in iser_send_data_out()
477 tx_desc->num_sge = 2; in iser_send_data_out()
479 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { in iser_send_data_out()
480 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n", in iser_send_data_out()
482 iser_task->data[ISER_DIR_OUT].data_len, itt); in iser_send_data_out()
483 err = -EINVAL; in iser_send_data_out()
486 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n", in iser_send_data_out()
490 err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); in iser_send_data_out()
503 struct iser_conn *iser_conn = conn->dd_data; in iser_send_control()
504 struct iscsi_iser_task *iser_task = task->dd_data; in iser_send_control()
505 struct iser_tx_desc *mdesc = &iser_task->desc; in iser_send_control()
511 mdesc->type = ISCSI_TX_CONTROL; in iser_send_control()
512 mdesc->cqe.done = iser_ctrl_comp; in iser_send_control()
515 device = iser_conn->ib_conn.device; in iser_send_control()
517 data_seg_len = ntoh24(task->hdr->dlength); in iser_send_control()
520 struct iser_login_desc *desc = &iser_conn->login_desc; in iser_send_control()
521 struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; in iser_send_control()
523 if (task != conn->login_task) { in iser_send_control()
528 ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma, in iser_send_control()
529 task->data_count, DMA_TO_DEVICE); in iser_send_control()
531 memcpy(desc->req, task->data, task->data_count); in iser_send_control()
533 ib_dma_sync_single_for_device(device->ib_device, desc->req_dma, in iser_send_control()
534 task->data_count, DMA_TO_DEVICE); in iser_send_control()
536 tx_dsg->addr = desc->req_dma; in iser_send_control()
537 tx_dsg->length = task->data_count; in iser_send_control()
538 tx_dsg->lkey = device->pd->local_dma_lkey; in iser_send_control()
539 mdesc->num_sge = 2; in iser_send_control()
542 if (task == conn->login_task) { in iser_send_control()
544 task->hdr->opcode, data_seg_len); in iser_send_control()
548 err = iser_post_rx_bufs(conn, task->hdr); in iser_send_control()
553 err = iser_post_send(&iser_conn->ib_conn, mdesc, true); in iser_send_control()
562 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_login_rsp() argument
564 struct ib_conn *ib_conn = wc->qp->qp_context; in iser_login_rsp()
566 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp()
571 if (unlikely(wc->status != IB_WC_SUCCESS)) { in iser_login_rsp()
572 iser_err_comp(wc, "login_rsp"); in iser_login_rsp()
576 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, in iser_login_rsp()
577 desc->rsp_dma, ISER_RX_LOGIN_SIZE, in iser_login_rsp()
580 hdr = desc->rsp + sizeof(struct iser_ctrl); in iser_login_rsp()
581 data = desc->rsp + ISER_HEADERS_LEN; in iser_login_rsp()
582 length = wc->byte_len - ISER_HEADERS_LEN; in iser_login_rsp()
584 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, in iser_login_rsp()
585 hdr->itt, length); in iser_login_rsp()
587 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length); in iser_login_rsp()
589 ib_dma_sync_single_for_device(ib_conn->device->ib_device, in iser_login_rsp()
590 desc->rsp_dma, ISER_RX_LOGIN_SIZE, in iser_login_rsp()
593 ib_conn->post_recv_buf_count--; in iser_login_rsp()
599 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || in iser_inv_desc()
600 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { in iser_inv_desc()
602 return -EINVAL; in iser_inv_desc()
605 desc->rsc.mr_valid = 0; in iser_inv_desc()
612 struct ib_wc *wc, in iser_check_remote_inv() argument
615 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { in iser_check_remote_inv()
617 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv()
622 if (unlikely(!iser_conn->snd_w_inv)) { in iser_check_remote_inv()
625 return -EPROTO; in iser_check_remote_inv()
628 task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt); in iser_check_remote_inv()
630 struct iscsi_iser_task *iser_task = task->dd_data; in iser_check_remote_inv()
633 if (iser_task->dir[ISER_DIR_IN]) { in iser_check_remote_inv()
634 desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h; in iser_check_remote_inv()
636 return -EINVAL; in iser_check_remote_inv()
639 if (iser_task->dir[ISER_DIR_OUT]) { in iser_check_remote_inv()
640 desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h; in iser_check_remote_inv()
642 return -EINVAL; in iser_check_remote_inv()
645 iser_err("failed to get task for itt=%d\n", hdr->itt); in iser_check_remote_inv()
646 return -EINVAL; in iser_check_remote_inv()
654 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_task_rsp() argument
656 struct ib_conn *ib_conn = wc->qp->qp_context; in iser_task_rsp()
658 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp()
663 if (unlikely(wc->status != IB_WC_SUCCESS)) { in iser_task_rsp()
664 iser_err_comp(wc, "task_rsp"); in iser_task_rsp()
668 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, in iser_task_rsp()
669 desc->dma_addr, ISER_RX_PAYLOAD_SIZE, in iser_task_rsp()
672 hdr = &desc->iscsi_header; in iser_task_rsp()
673 length = wc->byte_len - ISER_HEADERS_LEN; in iser_task_rsp()
675 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, in iser_task_rsp()
676 hdr->itt, length); in iser_task_rsp()
678 if (iser_check_remote_inv(iser_conn, wc, hdr)) { in iser_task_rsp()
679 iscsi_conn_failure(iser_conn->iscsi_conn, in iser_task_rsp()
684 iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length); in iser_task_rsp()
686 ib_dma_sync_single_for_device(ib_conn->device->ib_device, in iser_task_rsp()
687 desc->dma_addr, ISER_RX_PAYLOAD_SIZE, in iser_task_rsp()
690 /* decrementing conn->post_recv_buf_count only --after-- freeing the * in iser_task_rsp()
694 ib_conn->post_recv_buf_count--; in iser_task_rsp()
696 outstanding = ib_conn->post_recv_buf_count; in iser_task_rsp()
697 if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { in iser_task_rsp()
698 count = min(iser_conn->qp_max_recv_dtos - outstanding, in iser_task_rsp()
699 iser_conn->min_posted_rx); in iser_task_rsp()
706 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_cmd_comp() argument
708 if (unlikely(wc->status != IB_WC_SUCCESS)) in iser_cmd_comp()
709 iser_err_comp(wc, "command"); in iser_cmd_comp()
712 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_ctrl_comp() argument
714 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp()
717 if (unlikely(wc->status != IB_WC_SUCCESS)) { in iser_ctrl_comp()
718 iser_err_comp(wc, "control"); in iser_ctrl_comp()
723 task = (void *)desc - sizeof(struct iscsi_task); in iser_ctrl_comp()
724 if (task->hdr->itt == RESERVED_ITT) in iser_ctrl_comp()
728 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) in iser_dataout_comp() argument
730 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
731 struct ib_conn *ib_conn = wc->qp->qp_context; in iser_dataout_comp()
732 struct iser_device *device = ib_conn->device; in iser_dataout_comp()
734 if (unlikely(wc->status != IB_WC_SUCCESS)) in iser_dataout_comp()
735 iser_err_comp(wc, "dataout"); in iser_dataout_comp()
737 ib_dma_unmap_single(device->ib_device, desc->dma_addr, in iser_dataout_comp()
745 iser_task->status = ISER_TASK_STATUS_INIT; in iser_task_rdma_init()
747 iser_task->dir[ISER_DIR_IN] = 0; in iser_task_rdma_init()
748 iser_task->dir[ISER_DIR_OUT] = 0; in iser_task_rdma_init()
750 iser_task->data[ISER_DIR_IN].data_len = 0; in iser_task_rdma_init()
751 iser_task->data[ISER_DIR_OUT].data_len = 0; in iser_task_rdma_init()
753 iser_task->prot[ISER_DIR_IN].data_len = 0; in iser_task_rdma_init()
754 iser_task->prot[ISER_DIR_OUT].data_len = 0; in iser_task_rdma_init()
756 iser_task->prot[ISER_DIR_IN].dma_nents = 0; in iser_task_rdma_init()
757 iser_task->prot[ISER_DIR_OUT].dma_nents = 0; in iser_task_rdma_init()
759 memset(&iser_task->rdma_reg[ISER_DIR_IN], 0, in iser_task_rdma_init()
761 memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0, in iser_task_rdma_init()
767 int prot_count = scsi_prot_sg_count(iser_task->sc); in iser_task_rdma_finalize()
769 if (iser_task->dir[ISER_DIR_IN]) { in iser_task_rdma_finalize()
772 &iser_task->data[ISER_DIR_IN], in iser_task_rdma_finalize()
776 &iser_task->prot[ISER_DIR_IN], in iser_task_rdma_finalize()
780 if (iser_task->dir[ISER_DIR_OUT]) { in iser_task_rdma_finalize()
783 &iser_task->data[ISER_DIR_OUT], in iser_task_rdma_finalize()
787 &iser_task->prot[ISER_DIR_OUT], in iser_task_rdma_finalize()