| /linux/lib/crypto/ ! |
| H A D | gf128mul.c | 57 #define gf128mul_dat(q) { \ argument 58 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ 59 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ 60 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ 61 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ 62 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ 63 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ 64 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ 65 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ 66 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\ [all …]
|
| /linux/Documentation/networking/ ! |
| H A D | tls-offload-layers.svg | 1 …q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
|
| /linux/drivers/net/ethernet/fungible/funeth/ ! |
| H A D | funeth_rx.c | 50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument 52 struct funeth_rx_cache *c = &q->cache; in cache_offer() 58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer() 67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument 69 struct funeth_rx_cache *c = &q->cache; in cache_get() 77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get() 88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get() 98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument 103 if (cache_get(q, rb)) in funeth_alloc_page() 110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page() [all …]
|
| H A D | funeth_tx.c | 56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument 58 return (void *)q->hw_wb; in txq_end() 64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument 66 return txq_end(q) - p; in txq_to_end() 78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument 90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl() 93 if (txq_to_end(q, gle) == 0) { in fun_write_gl() 94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl() 107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument 132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx() [all …]
|
| /linux/drivers/gpu/drm/xe/ ! |
| H A D | xe_guc_submit.c | 51 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument 53 return &q->gt->uc.guc; in exec_queue_to_guc() 74 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument 76 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered() 79 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument 81 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered() 84 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument 86 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered() 89 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument 91 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled() [all …]
|
| H A D | xe_exec_queue.c | 39 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 42 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument 47 if (q->tlb_inval[i].dep_scheduler) in __xe_exec_queue_free() 48 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler); in __xe_exec_queue_free() 50 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free() 51 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free() 52 if (q->vm) in __xe_exec_queue_free() 53 xe_vm_put(q->vm); in __xe_exec_queue_free() 55 if (q->xef) in __xe_exec_queue_free() 56 xe_file_put(q->xef); in __xe_exec_queue_free() [all …]
|
| /linux/sound/core/seq/oss/ ! |
| H A D | seq_oss_readq.c | 35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local 37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new() 38 if (!q) in snd_seq_oss_readq_new() 41 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new() 42 if (!q->q) { in snd_seq_oss_readq_new() 43 kfree(q); in snd_seq_oss_readq_new() 47 q->maxlen = maxlen; in snd_seq_oss_readq_new() 48 q->qlen = 0; in snd_seq_oss_readq_new() 49 q->head = q->tail = 0; in snd_seq_oss_readq_new() 50 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new() [all …]
|
| H A D | seq_oss_writeq.c | 27 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local 30 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_writeq_new() 31 if (!q) in snd_seq_oss_writeq_new() 33 q->dp = dp; in snd_seq_oss_writeq_new() 34 q->maxlen = maxlen; in snd_seq_oss_writeq_new() 35 spin_lock_init(&q->sync_lock); in snd_seq_oss_writeq_new() 36 q->sync_event_put = 0; in snd_seq_oss_writeq_new() 37 q->sync_time = 0; in snd_seq_oss_writeq_new() 38 init_waitqueue_head(&q->sync_sleep); in snd_seq_oss_writeq_new() 47 return q; in snd_seq_oss_writeq_new() [all …]
|
| /linux/sound/core/seq/ ! |
| H A D | seq_queue.c | 50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument 57 queue_list[i] = q; in queue_list_add() 58 q->queue = i; in queue_list_add() 68 struct snd_seq_queue *q; in queue_list_remove() local 71 q = queue_list[id]; in queue_list_remove() 72 if (q) { in queue_list_remove() 73 guard(spinlock)(&q->owner_lock); in queue_list_remove() 74 if (q->owner == client) { in queue_list_remove() 76 q->klocked = 1; in queue_list_remove() 79 return q; in queue_list_remove() [all …]
|
| /linux/drivers/media/common/videobuf2/ ! |
| H A D | videobuf2-core.c | 47 #define dprintk(q, level, fmt, arg...) \ argument 50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 103 #define log_qop(q, op) \ argument 104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 105 (q)->ops->op ? "" : " (nop)") 107 #define call_qop(q, op, args...) \ argument 111 log_qop(q, op); \ 112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 114 (q)->cnt_ ## op++; \ 118 #define call_void_qop(q, op, args...) \ argument [all …]
|
| /linux/net/sched/ ! |
| H A D | sch_choke.c | 75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument 77 return (q->tail - q->head) & q->tab_mask; in choke_len() 81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument 83 return q->flags & TC_RED_ECN; in use_ecn() 87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument 89 return q->flags & TC_RED_HARDDROP; in use_harddrop() 93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument 96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes() 97 if (q->head == q->tail) in choke_zap_head_holes() 99 } while (q->tab[q->head] == NULL); in choke_zap_head_holes() [all …]
|
| H A D | sch_netem.c | 210 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument 212 struct clgstate *clg = &q->clg; in loss_4state() 213 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state() 275 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument 277 struct clgstate *clg = &q->clg; in loss_gilb_ell() 278 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell() 297 static bool loss_event(struct netem_sched_data *q) in loss_event() argument 299 switch (q->loss_model) { in loss_event() 302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event() 310 return loss_4state(q); in loss_event() [all …]
|
| H A D | sch_sfq.c | 143 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument 146 return &q->slots[val].dep; in sfq_dep_head() 147 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head() 150 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument 153 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash() 159 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local 166 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify() 169 fl = rcu_dereference_bh(q->filter_list); in sfq_classify() 171 return sfq_hash(q, skb) + 1; in sfq_classify() 187 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify() [all …]
|
| H A D | sch_dualpi2.c | 147 static u64 head_enqueue_time(struct Qdisc *q) in head_enqueue_time() argument 149 struct sk_buff *skb = qdisc_peek_head(q); in head_enqueue_time() 170 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q) in next_pi2_timeout() argument 172 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate); in next_pi2_timeout() 185 static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q) in skb_apply_step() argument 187 return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step; in skb_apply_step() 190 static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb) in dualpi2_mark() argument 193 q->ecn_mark++; in dualpi2_mark() 199 static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q) in dualpi2_reset_c_protection() argument 201 q->c_protection_credit = q->c_protection_init; in dualpi2_reset_c_protection() [all …]
|
| H A D | sch_sfb.c | 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) in increment_qlen() argument 144 increment_one_qlen(sfbhash, 0, q); in increment_qlen() 148 increment_one_qlen(sfbhash, 1, q); in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() argument 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument 173 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen() 177 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen() [all …]
|
| H A D | sch_fq_pie.c | 75 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument 78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash() 84 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local 91 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify() 94 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify() 96 return fq_pie_hash(q, skb) + 1; in fq_pie_classify() 112 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify() 134 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local 152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 155 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue() [all …]
|
| /linux/net/xdp/ ! |
| H A D | xsk_queue.h | 120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument 122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked() 123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked() 128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument 130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked() 131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked() 217 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument 219 return q->cached_cons != q->cached_prod; in xskq_has_descs() 222 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument 227 q->invalid_descs++; in xskq_cons_is_valid_desc() [all …]
|
| /linux/drivers/spi/ ! |
| H A D | spi-fsl-qspi.c | 277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument 279 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian() 282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument 284 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock() 287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument 289 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo() 292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument 294 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode() 297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument 299 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); in needs_amba_base_offset() [all …]
|
| /linux/block/ ! |
| H A D | blk-pm.c | 29 void blk_pm_runtime_init(struct request_queue *q, struct device *dev) in blk_pm_runtime_init() argument 31 q->dev = dev; in blk_pm_runtime_init() 32 q->rpm_status = RPM_ACTIVE; in blk_pm_runtime_init() 33 pm_runtime_set_autosuspend_delay(q->dev, -1); in blk_pm_runtime_init() 34 pm_runtime_use_autosuspend(q->dev); in blk_pm_runtime_init() 59 int blk_pre_runtime_suspend(struct request_queue *q) in blk_pre_runtime_suspend() argument 63 if (!q->dev) in blk_pre_runtime_suspend() 66 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); in blk_pre_runtime_suspend() 68 spin_lock_irq(&q->queue_lock); in blk_pre_runtime_suspend() 69 q->rpm_status = RPM_SUSPENDING; in blk_pre_runtime_suspend() [all …]
|
| /linux/drivers/infiniband/hw/ionic/ ! |
| H A D | ionic_queue.h | 49 int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev, 59 void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev); 69 static inline bool ionic_queue_empty(struct ionic_queue *q) in ionic_queue_empty() argument 71 return q->prod == q->cons; in ionic_queue_empty() 82 static inline u16 ionic_queue_length(struct ionic_queue *q) in ionic_queue_length() argument 84 return (q->prod - q->cons) & q->mask; in ionic_queue_length() 95 static inline u16 ionic_queue_length_remaining(struct ionic_queue *q) in ionic_queue_length_remaining() argument 97 return q->mask - ionic_queue_length(q); in ionic_queue_length_remaining() 108 static inline bool ionic_queue_full(struct ionic_queue *q) in ionic_queue_full() argument 110 return q->mask == ionic_queue_length(q); in ionic_queue_full() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ ! |
| H A D | rxe_queue.h | 83 void rxe_queue_reset(struct rxe_queue *q); 88 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, 95 static inline u32 queue_next_index(struct rxe_queue *q, int index) in queue_next_index() argument 97 return (index + 1) & q->index_mask; in queue_next_index() 100 static inline u32 queue_get_producer(const struct rxe_queue *q, in queue_get_producer() argument 108 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 112 prod = q->index; in queue_get_producer() 116 prod = q->buf->producer_index; in queue_get_producer() 120 prod = smp_load_acquire(&q->buf->producer_index); in queue_get_producer() 127 static inline u32 queue_get_consumer(const struct rxe_queue *q, in queue_get_consumer() argument [all …]
|
| /linux/drivers/net/wireless/broadcom/b43/ ! |
| H A D | pio.c | 24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument 37 cookie = (((u16)q->index + 1) << 12); in generate_cookie() 49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local 54 q = pio->tx_queue_AC_BK; in parse_cookie() 57 q = pio->tx_queue_AC_BE; in parse_cookie() 60 q = pio->tx_queue_AC_VI; in parse_cookie() 63 q = pio->tx_queue_AC_VO; in parse_cookie() 66 q = pio->tx_queue_mcast; in parse_cookie() 69 if (B43_WARN_ON(!q)) in parse_cookie() 72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie() [all …]
|
| /linux/drivers/net/wireless/mediatek/mt76/ ! |
| H A D | dma.c | 189 mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_magic_cnt_init() argument 191 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_queue_magic_cnt_init() 194 q->magic_cnt = 0; in mt76_dma_queue_magic_cnt_init() 195 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_queue_magic_cnt_init() 201 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_queue_magic_cnt_init() 202 for (i = 0; i < q->ndesc; i++) { in mt76_dma_queue_magic_cnt_init() 208 } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_queue_magic_cnt_init() 209 struct mt76_rro_rxdmad_c *dmad = (void *)q->desc; in mt76_dma_queue_magic_cnt_init() 214 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_magic_cnt_init() 220 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument [all …]
|
| /linux/drivers/s390/cio/ ! |
| H A D | qdio_main.c | 116 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument 119 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs() 122 qperf_inc(q, eqbs); in qdio_do_eqbs() 124 if (!q->is_input_q) in qdio_do_eqbs() 125 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs() 127 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs() 137 qperf_inc(q, eqbs_partial); in qdio_do_eqbs() 138 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs() 143 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs() 146 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_eqbs() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb3/ ! |
| H A D | sge.c | 169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument 171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset() 174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument 176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset() 189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument 193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq() 233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument 237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb() 240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb() 266 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb() [all …]
|