/linux/drivers/gpu/drm/nouveau/nvkm/falcon/ ! |
H A D | msgq.c | 26 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) in nvkm_falcon_msgq_open() argument 28 spin_lock(&msgq->lock); in nvkm_falcon_msgq_open() 29 msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); in nvkm_falcon_msgq_open() 33 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) in nvkm_falcon_msgq_close() argument 35 struct nvkm_falcon *falcon = msgq->qmgr->falcon; in nvkm_falcon_msgq_close() 38 nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); in nvkm_falcon_msgq_close() 40 spin_unlock(&msgq in nvkm_falcon_msgq_close() 44 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq * msgq) nvkm_falcon_msgq_empty() argument 52 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq * msgq,void * data,u32 size) nvkm_falcon_msgq_pop() argument 77 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq * msgq,struct nvfw_falcon_msg * hdr) nvkm_falcon_msgq_read() argument 115 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq * msgq,struct nvfw_falcon_msg * hdr) nvkm_falcon_msgq_exec() argument 140 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq * msgq) nvkm_falcon_msgq_recv() argument 154 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq * msgq,void * data,u32 size) nvkm_falcon_msgq_recv_initmsg() argument 177 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq * msgq,u32 index,u32 offset,u32 size) nvkm_falcon_msgq_init() argument 193 struct nvkm_falcon_msgq *msgq = *pmsgq; nvkm_falcon_msgq_del() local 204 struct nvkm_falcon_msgq *msgq = *pmsgq; nvkm_falcon_msgq_new() local [all...] |
/linux/drivers/scsi/arm/ ! |
H A D | msgqueue.c | 17 * Function: struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) 19 * Params : msgq - message queue to claim entry for 22 static struct msgqueue_entry *mqe_alloc(MsgQueue_t *msgq) in mqe_alloc() argument 26 if ((mq = msgq->free) != NULL) in mqe_alloc() 27 msgq->free = mq->next; in mqe_alloc() 33 * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) 35 * Params : msgq - message queue to free entry from 38 static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) in mqe_free() argument 41 mq->next = msgq->free; in mqe_free() 42 msgq in mqe_free() 51 msgqueue_initialise(MsgQueue_t * msgq) msgqueue_initialise() argument 70 msgqueue_free(MsgQueue_t * msgq) msgqueue_free() argument 80 msgqueue_msglength(MsgQueue_t * msgq) msgqueue_msglength() argument 98 msgqueue_getmsg(MsgQueue_t * msgq,int msgno) msgqueue_getmsg() argument 115 msgqueue_addmsg(MsgQueue_t * msgq,int length,...) msgqueue_addmsg() argument 148 msgqueue_flush(MsgQueue_t * msgq) msgqueue_flush() argument [all...] |
H A D | msgqueue.h | 32 * Function: void msgqueue_initialise(MsgQueue_t *msgq) 34 * Params : msgq - queue to initialise 36 extern void msgqueue_initialise(MsgQueue_t *msgq); 39 * Function: void msgqueue_free(MsgQueue_t *msgq) 41 * Params : msgq - queue to free 43 extern void msgqueue_free(MsgQueue_t *msgq); 46 * Function: int msgqueue_msglength(MsgQueue_t *msgq) 48 * Params : msgq - queue to examine 51 extern int msgqueue_msglength(MsgQueue_t *msgq); 54 * Function: struct message *msgqueue_getmsg(MsgQueue_t *msgq, in [all...] |
/linux/drivers/net/ethernet/brocade/bna/ ! |
H A D | bfa_msgq.c | 93 bfa_wc_down(&cmdq->msgq->init_wc); in cmdq_sm_init_wait_entry() 195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, in bfa_msgq_cmdq_dbell() 294 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, in bfa_msgq_cmdq_copy_rsp() 301 bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq) in bfa_msgq_cmdq_attach() argument 305 cmdq->msgq = msgq; in bfa_msgq_cmdq_attach() 356 bfa_wc_down(&rspq->msgq->init_wc); in rspq_sm_init_wait_entry() 403 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc)) in rspq_sm_dbell_wait_entry() 451 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb, in bfa_msgq_rspq_dbell() 489 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) in bfa_msgq_rspq_attach() argument 497 bfa_msgq_init_rsp(struct bfa_msgq * msgq,struct bfi_mbmsg * mb) bfa_msgq_init_rsp() argument 507 struct bfa_msgq *msgq = (struct bfa_msgq *)arg; bfa_msgq_init() local 526 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; bfa_msgq_isr() local 553 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; bfa_msgq_notify() local 588 bfa_msgq_memclaim(struct bfa_msgq * msgq,u8 * kva,u64 pa) bfa_msgq_memclaim() argument 601 bfa_msgq_attach(struct bfa_msgq * msgq,struct bfa_ioc * ioc) bfa_msgq_attach() argument 614 bfa_msgq_regisr(struct bfa_msgq * msgq,enum bfi_mclass mc,bfa_msgq_mcfunc_t cbfn,void * cbarg) bfa_msgq_regisr() argument 622 bfa_msgq_cmd_post(struct bfa_msgq * msgq,struct bfa_msgq_cmd_entry * cmd) bfa_msgq_cmd_post() argument 635 bfa_msgq_rsp_copy(struct bfa_msgq * msgq,u8 * buf,size_t buf_len) bfa_msgq_rsp_copy() argument [all...] |
H A D | bfa_msgq.h | 77 struct bfa_msgq *msgq; member 104 struct bfa_msgq *msgq; member 119 void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa); 120 void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc); 121 void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, 123 void bfa_msgq_cmd_post(struct bfa_msgq *msgq, 125 void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len);
|
H A D | bna_enet.c | 418 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); in bna_bfi_ethport_admin_up() 435 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); in bna_bfi_ethport_admin_down() 456 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); in bna_bfi_ethport_lpbk_up() 473 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); in bna_bfi_ethport_lpbk_down() 1171 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd); in bna_bfi_pause_set() 1630 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd); in bna_bfi_attr_get() 1729 bfa_msgq_attach(&bna->msgq, &ioceth->ioc); in bna_ioceth_init() 1730 bfa_msgq_memclaim(&bna->msgq, kva, dma); in bna_ioceth_init() 1731 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); in bna_ioceth_init() 1858 bfa_msgq_cmd_post(&bna->msgq, in bna_bfi_stats_get() [all...] |
H A D | bna_tx_rx.c | 190 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_ucast_req() 206 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_add_req() 222 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_del_req() 237 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_mcast_filter_req() 252 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_promisc_req() 277 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rx_vlan_filter_set() 292 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_vlan_strip_enable() 308 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rit_cfg() 328 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); in bna_bfi_rss_cfg() 343 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, in bna_bfi_rss_enable() [all...] |
H A D | bna_types.h | 928 struct bfa_msgq msgq; member
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ ! |
H A D | rpc.c | 81 * back to the msgq. The result is also formed as split elements. 144 u32 size, rptr = *gsp->msgq.rptr; in r535_gsp_msgq_wait() 149 if (WARN_ON(!size || size >= gsp->msgq.cnt)) in r535_gsp_msgq_wait() 153 u32 wptr = *gsp->msgq.wptr; in r535_gsp_msgq_wait() 155 used = wptr + gsp->msgq.cnt - rptr; in r535_gsp_msgq_wait() 156 if (used >= gsp->msgq.cnt) in r535_gsp_msgq_wait() 157 used -= gsp->msgq.cnt; in r535_gsp_msgq_wait() 173 u32 rptr = *gsp->msgq.rptr; in r535_gsp_msgq_get_entry() 176 return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE + in r535_gsp_msgq_get_entry() 237 u32 rptr = *gsp->msgq in r535_gsp_msgq_recv_one_elem() [all...] |
H A D | gsp.c | 51 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); in r535_gsp_msgq_work() 54 if (*gsp->msgq.rptr != *gsp->msgq.wptr) in r535_gsp_msgq_work() 76 schedule_work(&gsp->msgq.work); in r535_gsp_intr() 303 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); in r535_gsp_postinit() 1136 } *cmdq, *msgq; in r535_gsp_shared_init() local 1140 gsp->shm.msgq.size = 0x40000; in r535_gsp_shared_init() 1142 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; in r535_gsp_shared_init() 1148 gsp->shm.msgq.size, in r535_gsp_shared_init() 1155 gsp->shm.msgq in r535_gsp_shared_init() [all...] |
/linux/drivers/misc/bcm-vk/ ! |
H A D | bcm_vk_msg.c | 87 static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq, in msgq_occupied() argument 92 wr_idx = readl_relaxed(&msgq->wr_idx); in msgq_occupied() 93 rd_idx = readl_relaxed(&msgq->rd_idx); in msgq_occupied() 99 u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq, in msgq_avail_space() argument 102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1); in msgq_avail_space() 410 struct bcm_vk_msgq __iomem *msgq; in bcm_vk_sync_msgq() local 422 * the msgq-info may not be available until a later time. In in bcm_vk_sync_msgq() 427 dev_info(dev, "BAR1 msgq marker not initialized.\n"); in bcm_vk_sync_msgq() 437 "Advertised msgq %d error - max %d allowed\n", in bcm_vk_sync_msgq() 445 /* first msgq locatio in bcm_vk_sync_msgq() 546 struct bcm_vk_msgq __iomem *msgq; bcm_vk_append_ib_sgl() local 600 struct bcm_vk_msgq __iomem *msgq; bcm_to_v_msg_enqueue() local 782 struct bcm_vk_msgq __iomem *msgq; bcm_to_h_msg_dequeue() local 1080 struct bcm_vk_msgq __iomem *msgq; bcm_vk_write() local [all...] |
H A D | bcm_vk_msg.h | 39 * Structure to record static info from the msgq sync. We keep local copy 133 /* Mutex to access msgq */ 136 struct bcm_vk_msgq __iomem *msgq[VK_MSGQ_MAX_NR]; member
|
/linux/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ ! |
H A D | gp102.c | 127 ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg)); in gp102_sec2_initmsg() 137 nvkm_falcon_msgq_init(sec2->msgq, in gp102_sec2_initmsg() 172 if (!nvkm_falcon_msgq_empty(sec2->msgq)) in gp102_sec2_intr() 173 nvkm_falcon_msgq_recv(sec2->msgq); in gp102_sec2_intr() 215 .msgq = { 0xa30, 0xa34, 8 },
|
H A D | base.c | 118 nvkm_falcon_msgq_del(&sec2->msgq); in nvkm_sec2_dtor() 160 (ret = nvkm_falcon_msgq_new(sec2->qmgr, "msgq", &sec2->msgq))) in nvkm_sec2_new_()
|
H A D | tu102.c | 45 .msgq = { 0xc80, 0xc84, 8 },
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ ! |
H A D | gm20b.c | 164 ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg)); in gm20b_pmu_initmsg() 178 nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index, in gm20b_pmu_initmsg() 197 nvkm_falcon_msgq_recv(pmu->msgq); in gm20b_pmu_recv()
|
H A D | base.c | 108 nvkm_falcon_msgq_del(&pmu->msgq); in nvkm_pmu_dtor() 152 (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) in nvkm_pmu_ctor()
|
H A D | gp102.c | 41 .msgq = { 0x4c8, 0x4cc, 0 },
|
H A D | gm200.c | 58 .msgq = { 0x4c8, 0x4cc, 0 },
|
/linux/drivers/gpu/drm/nouveau/include/nvkm/engine/ ! |
H A D | sec2.h | 18 struct nvkm_falcon_msgq *msgq; member
|
H A D | falcon.h | 89 } cmdq, msgq; member
|
/linux/drivers/isdn/mISDN/ ! |
H A D | stack.c | 28 skb_queue_tail(&st->msgq, skb); in _queue_message() 221 skb = skb_dequeue(&st->msgq); in mISDNStackd() 226 skb = skb_dequeue(&st->msgq); in mISDNStackd() 266 if (!skb_queue_empty(&st->msgq)) in mISDNStackd() 315 skb_queue_purge(&st->msgq); in mISDNStackd() 379 skb_queue_head_init(&newst->msgq); in create_stack()
|
/linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/ ! |
H A D | pmu.h | 15 struct nvkm_falcon_msgq *msgq; member
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ ! |
H A D | gsp.c | 193 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; in r570_gsp_set_rmargs()
|
/linux/include/linux/ ! |
H A D | mISDNif.h | 511 struct sk_buff_head msgq; member
|