/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() [all …]
|
/linux-5.10/drivers/infiniband/hw/mlx4/ |
D | cq.c | 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 178 int entries = attr->cqe; in mlx4_ib_create_cq() 195 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 281 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
/linux-5.10/drivers/infiniband/sw/rxe/ |
D | rxe_cq.c | 12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument 16 if (cqe <= 0) { in rxe_cq_chk_attr() 17 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 pr_warn("cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 29 if (cqe < count) { in rxe_cq_chk_attr() 30 pr_warn("cqe(%d) < current # elements in queue (%d)", in rxe_cq_chk_attr() 31 cqe, count); in rxe_cq_chk_attr() 57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument [all …]
|
/linux-5.10/drivers/infiniband/sw/siw/ |
D | siw_cq.c | 44 * Reap one CQE from the CQ. Only used by kernel clients 46 * flush for user mapped CQE array as well. 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe() 60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 61 wc->byte_len = cqe->bytes; in siw_reap_cqe() 64 * During CQ flush, also user land CQE's may get in siw_reap_cqe() [all …]
|
/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 116 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 124 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 637 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local 644 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq() 645 if (likely(!cqe)) in mlx5e_poll_ico_cq() 660 wqe_counter = be16_to_cpu(cqe->wqe_counter); in mlx5e_poll_ico_cq() [all …]
|
D | wq.h | 202 struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); in mlx5_cqwq_get_wqe() local 205 cqe += wq->fbc.log_stride == 7; in mlx5_cqwq_get_wqe() 207 return cqe; in mlx5_cqwq_get_wqe() 233 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5_cqwq_get_cqe() local 234 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; in mlx5_cqwq_get_cqe() 240 /* ensure cqe content is read after cqe ownership bit */ in mlx5_cqwq_get_cqe() 243 return cqe; in mlx5_cqwq_get_cqe()
|
/linux-5.10/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() [all …]
|
/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 105 int entries = attr->cqe; in pvrdma_create_cq() 129 cq->ibcq.cqe = entries; in pvrdma_create_cq() 183 cmd->cqe = entries; in pvrdma_create_cq() 192 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 285 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 290 cq->ibcq.cqe); in _pvrdma_flush_cqe() 291 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 295 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe() 299 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe() [all …]
|
/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | cq.c | 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 88 return cqe; in get_sw_cqe() 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder() 189 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
/linux-5.10/tools/io_uring/ |
D | io_uring-cp.c | 126 struct io_uring_cqe *cqe; in copy_file() local 175 ret = io_uring_wait_cqe(ring, &cqe); in copy_file() 178 ret = io_uring_peek_cqe(ring, &cqe); in copy_file() 184 if (!cqe) in copy_file() 187 data = io_uring_cqe_get_data(cqe); in copy_file() 188 if (cqe->res < 0) { in copy_file() 189 if (cqe->res == -EAGAIN) { in copy_file() 191 io_uring_cqe_seen(ring, cqe); in copy_file() 194 fprintf(stderr, "cqe failed: %s\n", in copy_file() 195 strerror(-cqe->res)); in copy_file() [all …]
|
/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 1434 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1436 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1437 cqe->qp_handle = 0; in __clean_cq() 1444 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1446 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1447 cqe->qp_handle = 0; in __clean_cq() 2125 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2131 cqe = *pcqe; in __flush_sq() 2141 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 2142 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq() [all …]
|
/linux-5.10/drivers/net/ethernet/marvell/octeontx2/nic/ |
D | otx2_txrx.c | 81 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument 84 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler() 167 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument 183 hash = cqe->hdr.flow_tag; in otx2_set_rxhash() 188 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument 191 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg() 197 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg() 208 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() argument 211 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_check_rcv_errors() 258 if (cqe->sg.segs == 1) in otx2_check_rcv_errors() [all …]
|
/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci_hw.h | 114 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 127 char *cqe, u32 val) \ 132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 167 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); [all …]
|
/linux-5.10/drivers/scsi/qedi/ |
D | qedi_fw.c | 31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument 42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp() 50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp() 83 union iscsi_cqe *cqe, in qedi_process_text_resp() argument 98 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp() 110 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp() 185 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument 197 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp() 221 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp() 258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument [all …]
|
/linux-5.10/drivers/net/ethernet/qlogic/qede/ |
D | qede_fp.c | 656 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument 658 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params() 666 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 667 cqe->header_len; in qede_set_gro_params() 836 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument 838 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start() 843 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start() 846 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start() 867 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start() 870 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start() [all …]
|
/linux-5.10/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 967 int entries = attr->cqe; in ocrdma_create_cq() 1021 ibcq->cqe = new_cnt; in ocrdma_resize_cq() 1032 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local 1034 cqe = cq->va; in ocrdma_flush_cq() 1042 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq() 1044 cqe++; in ocrdma_flush_cq() 1399 /* syncronize with wqe, rqe posting and cqe processing contexts */ in ocrdma_modify_qp() 1585 /* discard the cqe for a given QP */ 1592 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 1598 * find the matching CQE for a given qp, in ocrdma_discard_cqes() [all …]
|
/linux-5.10/drivers/infiniband/sw/rdmavt/ |
D | cq.c | 96 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 97 head = cq->ibcq.cqe; in rvt_cq_enter() 209 unsigned int entries = attr->cqe; in rvt_create_cq() 279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. in rvt_create_cq() 291 cq->ibcq.cqe = entries; in rvt_create_cq() 380 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 392 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 399 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 405 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 435 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq() [all …]
|
/linux-5.10/drivers/scsi/bnx2i/ |
D | bnx2i_hwi.c | 1125 /* Invalidate all EQ CQE index, req only for 57710 */ in bnx2i_alloc_qp_resc() 1239 * initialization. Firmware completes this handshake with a CQE carrying 1332 * @cqe: pointer to newly DMA'ed CQE entry for processing 1334 * process SCSI CMD Response CQE & complete the request to SCSI-ML 1338 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() argument 1348 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1383 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1429 * @cqe: pointer to newly DMA'ed CQE entry for processing 1431 * process Login Response CQE & complete it to open-iscsi user daemon 1435 struct cqe *cqe) in bnx2i_process_login_resp() argument [all …]
|
/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls_rxtx.h | 58 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) in mlx5e_tls_handle_rx_skb() argument 60 if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ in mlx5e_tls_handle_rx_skb() 61 return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); in mlx5e_tls_handle_rx_skb() 70 mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } in mlx5e_accel_is_tls() argument 73 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} in mlx5e_tls_handle_rx_skb() argument
|
D | ipsec_rxtx.h | 74 struct mlx5_cqe64 *cqe); 80 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5_ipsec_is_rx_flow() argument 82 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5_ipsec_is_rx_flow() 96 struct mlx5_cqe64 *cqe) in mlx5e_ipsec_offload_handle_rx_skb() argument 99 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5_ipsec_is_rx_flow() argument
|
/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_qp.c | 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe() 326 if (!rq->cqe) in alloc_rq_cqe() 335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe() 336 sizeof(*rq->cqe[i]), in alloc_rq_cqe() 338 if (!rq->cqe[i]) in alloc_rq_cqe() 346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe() 352 vfree(rq->cqe); in alloc_rq_cqe() 368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe() 372 vfree(rq->cqe); in free_rq_cqe() [all …]
|
/linux-5.10/include/linux/mlx5/ |
D | device.h | 845 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5_get_cqe_format() argument 847 return (cqe->op_own >> 2) & 0x3; in mlx5_get_cqe_format() 850 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) in get_cqe_opcode() argument 852 return cqe->op_own >> 4; in get_cqe_opcode() 855 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument 857 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh() 860 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument 862 return (cqe->l4_l3_hdr_type >> 4) & 0x7; in get_cqe_l4_hdr_type() 865 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument 867 return (cqe->l4_l3_hdr_type >> 2) & 0x3; in get_cqe_l3_hdr_type() [all …]
|
/linux-5.10/drivers/infiniband/ulp/iser/ |
D | iscsi_iser.h | 235 * @cqe: completion handler 248 struct ib_cqe cqe; member 266 * @cqe: completion handler 275 struct ib_cqe cqe; member 287 * @cqe: completion handler 295 struct ib_cqe cqe; member 578 iser_rx(struct ib_cqe *cqe) in iser_rx() argument 580 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx() 584 iser_tx(struct ib_cqe *cqe) in iser_tx() argument 586 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx() [all …]
|
/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 623 * the (IPv4 | IPv6) bits are set in cqe->status. 625 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument 631 /* CQE csum doesn't cover padding octets in short ethernet in check_csum() 643 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum() 645 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum() 652 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum() 672 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local 690 * descriptor offset can be deduced from the CQE index instead of in mlx4_en_process_rx_cq() 691 * reading 'cqe->index' */ in mlx4_en_process_rx_cq() 693 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() [all …]
|
/linux-5.10/drivers/net/ethernet/ibm/ehea/ |
D | ehea_main.c | 528 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) in ehea_check_cqe() argument 530 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe() 531 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe() 533 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe() 534 (cqe->header_length == 0)) in ehea_check_cqe() 540 struct sk_buff *skb, struct ehea_cqe *cqe, in ehea_fill_skb() argument 543 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb() 550 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb() 552 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb() 561 struct ehea_cqe *cqe) in get_skb_by_index() argument [all …]
|