/linux/drivers/infiniband/hw/mana/ |
H A D | wr.c | 10 static int mana_ib_post_recv_ud(struct mana_ib_qp *qp, const struct ib_recv_wr *wr) in mana_ib_post_recv_ud() argument 23 if (wr->num_sge > MAX_WR_SGL_NUM) in mana_ib_post_recv_ud() 26 for (i = 0; i < wr->num_sge; ++i) { in mana_ib_post_recv_ud() 27 gdma_sgl[i].address = wr->sg_list[i].addr; in mana_ib_post_recv_ud() 28 gdma_sgl[i].mem_key = wr->sg_list[i].lkey; in mana_ib_post_recv_ud() 29 gdma_sgl[i].size = wr->sg_list[i].length; in mana_ib_post_recv_ud() 31 wqe_req.num_sge = wr->num_sge; in mana_ib_post_recv_ud() 41 shadow_wqe->header.wr_id = wr->wr_id; in mana_ib_post_recv_ud() 49 int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in mana_ib_post_recv() argument 55 for (; wr; wr = wr->next) { in mana_ib_post_recv() [all …]
|
/linux/arch/arm64/kvm/ |
H A D | at.c | 13 static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw) in fail_s1_walk() argument 15 wr->fst = fst; in fail_s1_walk() 16 wr->ptw = s1ptw; in fail_s1_walk() 17 wr->s2 = s1ptw; in fail_s1_walk() 18 wr->failed = true; in fail_s1_walk() 99 struct s1_walk_result *wr, u64 va) in setup_s1_walk() argument 161 wr->level = S1_MMU_DISABLED; in setup_s1_walk() 168 wr->level = S1_MMU_DISABLED; in setup_s1_walk() 172 if (wr->level == S1_MMU_DISABLED) { in setup_s1_walk() 176 wr->pa = va; in setup_s1_walk() [all …]
|
/linux/lib/ |
H A D | decompress_unlzma.c | 294 static inline size_t INIT get_pos(struct writer *wr) in get_pos() argument 297 wr->global_pos + wr->buffer_pos; in get_pos() 300 static inline uint8_t INIT peek_old_byte(struct writer *wr, in peek_old_byte() argument 303 if (!wr->flush) { in peek_old_byte() 305 while (offs > wr->header->dict_size) in peek_old_byte() 306 offs -= wr->header->dict_size; in peek_old_byte() 307 pos = wr->buffer_pos - offs; in peek_old_byte() 308 return wr->buffer[pos]; in peek_old_byte() 310 uint32_t pos = wr->buffer_pos - offs; in peek_old_byte() 311 while (pos >= wr->header->dict_size) in peek_old_byte() [all …]
|
/linux/tools/bpf/bpftool/ |
H A D | json_writer.c | 311 json_writer_t *wr = jsonw_new(stdout); in main() local 313 jsonw_start_object(wr); in main() 314 jsonw_pretty(wr, true); in main() 315 jsonw_name(wr, "Vyatta"); in main() 316 jsonw_start_object(wr); in main() 317 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 318 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 319 jsonw_float_field(wr, "stock", 8.16); in main() 321 jsonw_name(wr, "ARGV"); in main() 322 jsonw_start_array(wr); in main() [all …]
|
/linux/tools/testing/selftests/bpf/ |
H A D | json_writer.c | 311 json_writer_t *wr = jsonw_new(stdout); in main() local 313 jsonw_start_object(wr); in main() 314 jsonw_pretty(wr, true); in main() 315 jsonw_name(wr, "Vyatta"); in main() 316 jsonw_start_object(wr); in main() 317 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 318 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 319 jsonw_float_field(wr, "stock", 8.16); in main() 321 jsonw_name(wr, "ARGV"); in main() 322 jsonw_start_array(wr); in main() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 9 #include "wr.h" 54 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_eth_seg() argument 61 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg() 65 if (wr->opcode == IB_WR_LSO) { in set_eth_seg() 66 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg() 103 const struct ib_send_wr *wr) in set_datagram_seg() argument 105 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg() 107 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg() 108 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg() 228 static __be32 send_ieth(const struct ib_send_wr *wr) in send_ieth() argument [all …]
|
H A D | gsi.c | 51 struct mlx5_ib_gsi_wr *wr; in generate_completions() local 56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions() 58 if (!wr->completed) in generate_completions() 61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions() 62 wr->completed = false; in generate_completions() 71 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local 78 wr->completed = true; in handle_single_completion() 79 wr_id = wr->wc.wr_id; in handle_single_completion() 80 wr->wc = *wc; in handle_single_completion() 81 wr->wc.wr_id = wr_id; in handle_single_completion() [all …]
|
/linux/include/trace/events/ |
H A D | ib_mad.h | 24 TP_PROTO(struct ib_mad_send_wr_private *wr, 26 TP_ARGS(wr, qp_info), 55 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 56 __entry->port_num = wr->mad_agent_priv->agent.port_num; 57 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 58 __entry->agent_priv = wr->mad_agent_priv; 59 __entry->wrtid = wr->tid; 60 __entry->max_retries = wr->max_retries; 61 __entry->retries_left = wr->retries_left; 62 __entry->retry = wr->retry; [all …]
|
/linux/drivers/ata/pata_parport/ |
H A D | bpck.c | 93 #define WR(r,v) bpck_write_regr(pi,2,r,v) macro 103 WR(4, 0x40); in bpck_write_block() 109 WR(4, 0); in bpck_write_block() 113 WR(4, 0x50); in bpck_write_block() 119 WR(4, 0x10); in bpck_write_block() 123 WR(4, 0x48); in bpck_write_block() 128 WR(4, 8); in bpck_write_block() 132 WR(4, 0x48); in bpck_write_block() 137 WR(4, 8); in bpck_write_block() 141 WR(4, 0x48); in bpck_write_block() [all …]
|
H A D | epia.c | 94 #define WR(r, v) epia_write_regr(pi, 0, r, v) macro 114 WR(0x86, 8); in epia_connect() 119 /* WR(0x84,0x10); */ in epia_disconnect() 167 WR(0x84, 3); in epia_read_block() 171 w2(4); WR(0x84, 0); in epia_read_block() 175 WR(0x84, 3); in epia_read_block() 179 w2(4); WR(0x84, 0); in epia_read_block() 183 WR(0x84, 3); in epia_read_block() 187 w2(4); WR(0x84, 0); in epia_read_block() 215 WR(0x84, 1); in epia_write_block() [all …]
|
/linux/fs/orangefs/ |
H A D | inode.c | 23 struct orangefs_write_range *wr = NULL; in orangefs_writepage_locked() local 34 wr = folio->private; in orangefs_writepage_locked() 35 off = wr->pos; in orangefs_writepage_locked() 36 if ((off + wr->len > len) && (off <= len)) in orangefs_writepage_locked() 39 wlen = wr->len; in orangefs_writepage_locked() 41 wlen = wr->len; in orangefs_writepage_locked() 56 len, wr, NULL, NULL); in orangefs_writepage_locked() 82 struct orangefs_write_range *wrp, wr; in orangefs_writepages_work() local 105 wr.uid = ow->uid; in orangefs_writepages_work() 106 wr.gid = ow->gid; in orangefs_writepages_work() [all …]
|
/linux/drivers/isdn/hardware/mISDN/ |
H A D | ipac.h | 122 #define IPAC_MASKB 0x20 /* WR */ 124 #define IPAC_CMDRB 0x21 /* WR */ 128 #define IPAC_RAH1 0x26 /* WR */ 129 #define IPAC_RAH2 0x27 /* WR */ 132 #define IPAC_RAL2 0x29 /* WR */ 134 #define IPAC_XBCL 0x2A /* WR */ 137 #define IPAC_XBCH 0x2D /* WR */ 139 #define IPAC_RLCR 0x2E /* WR */ 141 #define IPAC_TSAX 0x30 /* WR */ 142 #define IPAC_TSAR 0x31 /* WR */ [all …]
|
/linux/drivers/infiniband/core/ |
H A D | rw.c | 81 reg->inv_wr.next = ®->reg_wr.wr; in rdma_rw_inv_key() 112 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr() 152 prev->wr.wr.next = ®->inv_wr; in rdma_rw_init_mr_wrs() 154 prev->wr.wr.next = ®->reg_wr.wr; in rdma_rw_init_mr_wrs() 157 reg->reg_wr.wr.next = ®->wr.wr; in rdma_rw_init_mr_wrs() 159 reg->wr.wr.sg_list = ®->sge; in rdma_rw_init_mr_wrs() 160 reg->wr.wr.num_sge = 1; in rdma_rw_init_mr_wrs() 161 reg->wr.remote_addr = remote_addr; in rdma_rw_init_mr_wrs() 162 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs() 164 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; in rdma_rw_init_mr_wrs() [all …]
|
/linux/drivers/staging/media/ipu7/ |
H A D | ipu7-mmu.h | 30 /* IS MMU Cmd WR */ 36 /* IS MMU Data WR Snoop */ 42 /* IS MMU Data WR ISOC */ 54 /* PS MMU FW WR */ 66 /* PS MMU FW Data WR VC0 */ 76 /* IS UAO UC WR */ 80 /* IS UAO M0 WR */ 84 /* IS UAO M1 WR */ 92 /* PS UAO FW WR */ 100 /* PS UAO SRT WR */ [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 647 const struct ib_reg_wr *wr) in set_reg_seg() argument 649 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg() 651 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg() 652 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg() 653 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg() 654 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg() 655 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg() 656 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg() 657 wqe_hdr->wr.fast_reg.rkey = wr->key; in set_reg_seg() 666 * @wr: work request list to post [all …]
|
/linux/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 257 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local 268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 280 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr() 281 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr() 282 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr() 283 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr() 284 wr->wr.num_sge = 0; in iser_reg_sig_mr() 285 wr->wr.send_flags = 0; in iser_reg_sig_mr() 286 wr->mr = mr; in iser_reg_sig_mr() 287 wr->key = mr->rkey; in iser_reg_sig_mr() [all …]
|
/linux/Documentation/driver-api/soundwire/ |
H A D | bra_cadence.rst | 33 + 1 | 0 | ID = 0 | WR HDR[1] | WR HDR[0] | 34 + | | | WR HDR[3] | WR HDR[2] | 35 + | | | WR HDR[5] | WR HDR[4] | 36 + | | | pad | WR HDR CRC | 37 + | | | WR Data[1] | WR Data[0] | 38 + | | | WR Data[3] | WR Data[2] | 39 + | | | WR Data[n-2] | WR Data[n-3] | 40 + | | | pad | WR Data[n-1] | 41 + 0 | 1 | | pad | WR Data CRC | 56 + 1 | 0 | ID = 0 | pad | WR Hdr Rsp | [all …]
|
/linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/ |
H A D | pipeline.json | 27 …issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting … 30 …issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting … 33 …r data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting … 36 …r data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting … 39 …that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awa… 42 …that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awa… 45 …to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load", 48 … to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load" 51 …o the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store… 54 …o the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store" [all …]
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_verbs.c | 683 /* Complete SQ WR's without processing */ 684 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr, in siw_sq_flush_wr() argument 689 while (wr) { in siw_sq_flush_wr() 692 switch (wr->opcode) { in siw_sq_flush_wr() 722 sqe.id = wr->wr_id; in siw_sq_flush_wr() 728 *bad_wr = wr; in siw_sq_flush_wr() 731 wr = wr->next; in siw_sq_flush_wr() 736 /* Complete RQ WR's without processing */ 737 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr, in siw_rq_flush_wr() argument 743 while (wr) { in siw_rq_flush_wr() [all …]
|
/linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/ |
H A D | pipeline.json | 39 …on is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded", 42 …ion is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded" 45 …iting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded", 48 …aiting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded" 51 …ock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded", 54 …lock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded" 57 …backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load", 60 … backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load" 63 …ackend, store. This event counts every cycle where there is a stall in the Wr stage due to a store… 66 …ackend, store. This event counts every cycle where there is a stall in the Wr stage due to a store" [all …]
|
/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_mw.c | 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw() 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw() 144 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw() 170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; in rxe_bind_mw() 171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; in rxe_bind_mw() 172 int access = wqe->wr.wr.mw.access; in rxe_bind_mw() [all …]
|
H A D | rxe_verbs.c | 509 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in rxe_post_srq_recv() argument 518 while (wr) { in rxe_post_srq_recv() 519 err = post_one_recv(&srq->rq, wr); in rxe_post_srq_recv() 522 wr = wr->next; in rxe_post_srq_recv() 528 *bad_wr = wr; in rxe_post_srq_recv() 683 /* send wr */ 699 rxe_err_qp(qp, "bad wr opcode for qp type\n"); in validate_send_wr() 747 static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, in init_send_wr() argument 750 wr->wr_id = ibwr->wr_id; in init_send_wr() 751 wr->opcode = ibwr->opcode; in init_send_wr() [all …]
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 155 * so no need to post a RESET WR for these EQs. in destroy_qp() 415 const struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument 423 for (i = 0; i < wr->num_sge; i++) { in build_immd() 424 if ((plen + wr->sg_list[i].length) > max) in build_immd() 426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd() 427 plen += wr->sg_list[i].length; in build_immd() 428 rem = wr->sg_list[i].length; in build_immd() 490 const struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument 496 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send() 498 switch (wr->opcode) { in build_rdma_send() [all …]
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 1501 const struct ib_ud_wr *wr, in build_mlx_header() argument 1511 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header() 1514 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header() 1525 switch (wr->wr.opcode) { in build_mlx_header() 1533 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header() 1542 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header() 1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, in build_mlx_header() 1550 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header() 1552 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header() 1553 sqp->qkey : wr->remote_qkey); in build_mlx_header() [all …]
|
/linux/tools/testing/selftests/breakpoints/ |
H A D | breakpoint_test_arm64.c | 33 static void child(int size, int wr) in child() argument 35 volatile uint8_t *addr = &var[32 + wr]; in child() 112 static bool run_test(int wr_size, int wp_size, int wr, int wp) in run_test() argument 125 child(wr_size, wr); in run_test() 204 int wr, wp, size; in main() local 215 for (wr = 0; wr <= 32; wr = wr + size) { in main() 216 for (wp = wr - size; wp <= wr + size; wp = wp + size) { in main() 217 result = run_test(size, MIN(size, 8), wr, wp); in main() 218 if ((result && wr == wp) || in main() 219 (!result && wr != wp)) in main() [all …]
|