Lines Matching refs:snic
15 #include "snic.h"
25 struct snic *snic = svnic_dev_priv(wq->vdev);
30 SNIC_HOST_INFO(snic->shost,
34 SNIC_TRC(snic->shost->host_no, 0, 0,
49 struct snic *snic = svnic_dev_priv(vdev);
54 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
55 svnic_wq_service(&snic->wq[q_num],
60 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
66 snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
71 snic->s_stats.misc.last_ack_time = jiffies;
72 for (i = 0; i < snic->wq_count; i++) {
73 work_done += svnic_cq_service(&snic->cq[i],
87 struct snic *snic = svnic_dev_priv(wq->vdev);
91 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
95 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
97 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
103 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
106 snic_pci_unmap_rsp_buf(snic, rqi);
110 snic_req_free(snic, rqi);
111 SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
119 snic_select_wq(struct snic *snic)
128 snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
130 int nr_wqdesc = snic->config.wq_enet_desc_count;
137 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
143 nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
149 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
153 struct snic_fw_stats *fwstats = &snic->s_stats.fw;
162 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
163 if (dma_mapping_error(&snic->pdev->dev, pa)) {
164 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
171 q_num = snic_select_wq(snic);
173 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
174 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
176 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
178 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
179 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
180 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
185 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
191 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
200 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
204 snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
210 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
211 list_add_tail(&rqi->list, &snic->spl_cmd_list);
212 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
220 snic_req_init(struct snic *snic, int sg_cnt)
228 rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
230 atomic64_inc(&snic->s_stats.io.alloc_fail);
231 SNIC_HOST_ERR(snic->shost,
232 "Failed to allocate memory from snic req pool id = %d\n",
242 rqi->snic = snic;
251 if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
252 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
255 atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
263 SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
272 snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
283 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
285 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
303 snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
309 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
311 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
328 snic_req_free(struct snic *snic, struct snic_req_info *rqi)
334 SNIC_SCSI_DBG(snic->shost,
340 dma_unmap_single(&snic->pdev->dev,
345 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
350 dma_unmap_single(&snic->pdev->dev,
355 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
359 dma_unmap_single(&snic->pdev->dev,
364 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
368 snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
374 dma_unmap_single(&snic->pdev->dev,
384 snic_free_all_untagged_reqs(struct snic *snic)
390 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
391 list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
395 snic_pci_unmap_rsp_buf(snic, rqi);
400 snic_req_free(snic, rqi);
402 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
409 snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
413 spin_lock_irqsave(&snic->snic_lock, flags);
414 if (snic->in_remove) {
415 spin_unlock_irqrestore(&snic->snic_lock, flags);
418 spin_unlock_irqrestore(&snic->snic_lock, flags);
420 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
422 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
426 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
427 snic_req_free(snic, rqi);
547 snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
553 if (duration > atomic64_read(&snic->s_stats.io.max_time))
554 atomic64_set(&snic->s_stats.io.max_time, duration);