Lines Matching refs:mtt

25 	struct erdma_mtt *mtt = mem->mtt;
28 *addr0 = mtt->buf_dma;
32 *addr0 = mtt->buf[0];
33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
148 if (mr->mem.mtt->continuous) {
149 req.phy_addr[0] = mr->mem.mtt->buf_dma;
152 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
153 mtt_level = mr->mem.mtt->level;
156 memcpy(req.phy_addr, mr->mem.mtt->buf,
179 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
225 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
226 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
231 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
232 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
587 struct erdma_mtt *mtt = mem->mtt;
591 while (mtt->low_level)
592 mtt = mtt->low_level;
595 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
601 struct erdma_mtt *mtt;
603 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
604 if (!mtt)
607 mtt->size = size;
608 mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
609 if (!mtt->buf)
612 mtt->continuous = true;
613 mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
615 if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
618 return mtt;
621 kfree(mtt->buf);
624 kfree(mtt);
630 struct erdma_mtt *mtt)
632 dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
633 DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
634 vfree(mtt->sglist);
638 struct erdma_mtt *mtt)
640 erdma_destroy_mtt_buf_sg(dev, mtt);
641 vfree(mtt->buf);
642 kfree(mtt);
645 static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
652 mtt->buf[idx++] = sg_dma_address(sg);
655 static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
658 void *buf = mtt->buf;
666 npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
684 mtt->sglist = sglist;
685 mtt->nsg = nsg;
697 struct erdma_mtt *mtt;
700 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
701 if (!mtt)
704 mtt->size = ALIGN(size, PAGE_SIZE);
705 mtt->buf = vzalloc(mtt->size);
706 mtt->continuous = false;
707 if (!mtt->buf)
710 ret = erdma_create_mtt_buf_sg(dev, mtt);
714 ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
715 mtt->size, mtt->nsg);
717 return mtt;
720 vfree(mtt->buf);
723 kfree(mtt);
731 struct erdma_mtt *mtt, *tmp_mtt;
743 mtt = erdma_create_scatter_mtt(dev, size);
744 if (IS_ERR(mtt))
745 return mtt;
748 /* convergence the mtt table. */
749 while (mtt->nsg != 1 && level <= 3) {
750 tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
755 erdma_init_middle_mtt(tmp_mtt, mtt);
756 tmp_mtt->low_level = mtt;
757 mtt = tmp_mtt;
766 mtt->level = level;
767 ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
768 mtt->level, mtt->sglist[0].dma_address);
770 return mtt;
772 while (mtt) {
773 tmp_mtt = mtt->low_level;
774 erdma_destroy_scatter_mtt(dev, mtt);
775 mtt = tmp_mtt;
781 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
785 if (mtt->continuous) {
786 dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
788 kfree(mtt->buf);
789 kfree(mtt);
791 while (mtt) {
792 tmp_mtt = mtt->low_level;
793 erdma_destroy_scatter_mtt(dev, mtt);
794 mtt = tmp_mtt;
818 mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
820 if (IS_ERR(mem->mtt)) {
821 ret = PTR_ERR(mem->mtt);
840 if (mem->mtt)
841 erdma_destroy_mtt(dev, mem->mtt);
1150 mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
1151 if (IS_ERR(mr->mem.mtt)) {
1152 ret = PTR_ERR(mr->mem.mtt);
1163 erdma_destroy_mtt(dev, mr->mem.mtt);
1182 mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;