/linux-5.10/lib/ |
D | list_sort.c | 133 * 2:1 balanced merges. Given two pending sublists of size 2^k, they are 143 * pending lists. This is beautiully simple code, but rather subtle. 151 * 2^k, which is when we have 2^k elements pending in smaller lists, 156 * a third list of size 2^(k+1), so there are never more than two pending. 158 * The number of pending lists of size 2^k is determined by the 167 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k 168 * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k 169 * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k 170 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k 171 * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k [all …]
|
/linux-5.10/drivers/staging/fwserial/ |
D | dma_fifo.c | 41 INIT_LIST_HEAD(&fifo->pending); in dma_fifo_init() 98 struct dma_pending *pending, *next; in dma_fifo_free() local 103 list_for_each_entry_safe(pending, next, &fifo->pending, link) in dma_fifo_free() 104 list_del_init(&pending->link); in dma_fifo_free() 115 struct dma_pending *pending, *next; in dma_fifo_reset() local 120 list_for_each_entry_safe(pending, next, &fifo->pending, link) in dma_fifo_reset() 121 list_del_init(&pending->link); in dma_fifo_reset() 222 list_add_tail(&pended->link, &fifo->pending); in dma_fifo_out_pend() 244 struct dma_pending *pending, *next, *tmp; in dma_fifo_out_complete() local 250 if (list_empty(&fifo->pending) && fifo->open == 0) in dma_fifo_out_complete() [all …]
|
/linux-5.10/arch/powerpc/kvm/ |
D | book3s_xive_template.c | 37 /* Grab CPPR of the most favored pending interrupt */ in GLUE() 40 xc->pending |= 1 << cppr; in GLUE() 86 * pending. in GLUE() 116 u8 pending, int scan_type) in GLUE() 121 /* Find highest pending priority */ in GLUE() 122 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { in GLUE() 128 * If pending is 0 this will return 0xff which is what in GLUE() 131 prio = ffs(pending) - 1; in GLUE() 172 * This is safe because if we have another pending MFRR in GLUE() 195 /* Clear the pending bit if the queue is now empty */ in GLUE() [all …]
|
/linux-5.10/arch/mips/vr41xx/common/ |
D | irq.c | 80 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; in plat_irq_dispatch() local 82 if (pending & CAUSEF_IP7) in plat_irq_dispatch() 84 else if (pending & 0x7800) { in plat_irq_dispatch() 85 if (pending & CAUSEF_IP3) in plat_irq_dispatch() 87 else if (pending & CAUSEF_IP4) in plat_irq_dispatch() 89 else if (pending & CAUSEF_IP5) in plat_irq_dispatch() 91 else if (pending & CAUSEF_IP6) in plat_irq_dispatch() 93 } else if (pending & CAUSEF_IP2) in plat_irq_dispatch() 95 else if (pending & CAUSEF_IP0) in plat_irq_dispatch() 97 else if (pending & CAUSEF_IP1) in plat_irq_dispatch()
|
/linux-5.10/drivers/gpu/drm/i915/ |
D | i915_sw_fence.c | 145 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ in __i915_sw_fence_wake_up_all() 192 if (!atomic_dec_and_test(&fence->pending)) in __i915_sw_fence_complete() 220 int pending; in i915_sw_fence_await() local 226 pending = atomic_read(&fence->pending); in i915_sw_fence_await() 228 if (pending < 1) in i915_sw_fence_await() 230 } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1)); in i915_sw_fence_await() 252 atomic_set(&fence->pending, 1); in i915_sw_fence_reinit() 336 int pending; in __i915_sw_fence_await_sw_fence() local 352 pending = 0; in __i915_sw_fence_await_sw_fence() 364 pending |= I915_SW_FENCE_FLAG_ALLOC; in __i915_sw_fence_await_sw_fence() [all …]
|
/linux-5.10/drivers/gpu/drm/mediatek/ |
D | mtk_drm_plane.c | 51 state->pending.format = DRM_FORMAT_RGB565; in mtk_plane_reset() 67 state->pending = old_state->pending; in mtk_plane_duplicate_state() 125 state->pending.async_dirty = true; in mtk_plane_atomic_async_update() 172 state->pending.enable = false; in mtk_plane_atomic_disable() 174 state->pending.dirty = true; in mtk_plane_atomic_disable() 205 state->pending.enable = true; in mtk_plane_atomic_update() 206 state->pending.pitch = pitch; in mtk_plane_atomic_update() 207 state->pending.format = format; in mtk_plane_atomic_update() 208 state->pending.addr = addr; in mtk_plane_atomic_update() 209 state->pending.x = plane->state->dst.x1; in mtk_plane_atomic_update() [all …]
|
D | mtk_disp_ovl.c | 255 struct mtk_plane_pending_state *pending = &state->pending; in mtk_ovl_layer_config() local 256 unsigned int addr = pending->addr; in mtk_ovl_layer_config() 257 unsigned int pitch = pending->pitch & 0xffff; in mtk_ovl_layer_config() 258 unsigned int fmt = pending->format; in mtk_ovl_layer_config() 259 unsigned int offset = (pending->y << 16) | pending->x; in mtk_ovl_layer_config() 260 unsigned int src_size = (pending->height << 16) | pending->width; in mtk_ovl_layer_config() 263 if (!pending->enable) { in mtk_ovl_layer_config() 272 if (pending->rotation & DRM_MODE_REFLECT_Y) { in mtk_ovl_layer_config() 274 addr += (pending->height - 1) * pending->pitch; in mtk_ovl_layer_config() 277 if (pending->rotation & DRM_MODE_REFLECT_X) { in mtk_ovl_layer_config() [all …]
|
/linux-5.10/arch/mips/sni/ |
D | pcit.c | 183 u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG; in pcit_hwint1() local 187 irq = ffs((pending >> 16) & 0x7f); in pcit_hwint1() 196 u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG; in pcit_hwint0() local 200 irq = ffs((pending >> 16) & 0x3f); in pcit_hwint0() 209 u32 pending = read_c0_cause() & read_c0_status(); in sni_pcit_hwint() local 211 if (pending & C_IRQ1) in sni_pcit_hwint() 213 else if (pending & C_IRQ2) in sni_pcit_hwint() 215 else if (pending & C_IRQ3) in sni_pcit_hwint() 217 else if (pending & C_IRQ5) in sni_pcit_hwint() 223 u32 pending = read_c0_cause() & read_c0_status(); in sni_pcit_hwint_cplus() local [all …]
|
/linux-5.10/arch/mips/cobalt/ |
D | irq.c | 25 unsigned pending = read_c0_status() & read_c0_cause() & ST0_IM; in plat_irq_dispatch() local 28 if (pending & CAUSEF_IP2) in plat_irq_dispatch() 30 else if (pending & CAUSEF_IP6) { in plat_irq_dispatch() 36 } else if (pending & CAUSEF_IP3) in plat_irq_dispatch() 38 else if (pending & CAUSEF_IP4) in plat_irq_dispatch() 40 else if (pending & CAUSEF_IP5) in plat_irq_dispatch() 42 else if (pending & CAUSEF_IP7) in plat_irq_dispatch()
|
/linux-5.10/Documentation/virt/kvm/devices/ |
D | xics.rst | 47 * Pending interrupt priority, 8 bits 48 Zero is the highest priority, 255 means no interrupt is pending. 50 * Pending IPI (inter-processor interrupt) priority, 8 bits 51 Zero is the highest priority, 255 means no IPI is pending. 53 * Pending interrupt source number, 24 bits 54 Zero means no interrupt pending, 2 means an IPI is pending 88 * Pending flag, 1 bit 90 This bit is 1 if the source has a pending interrupt, otherwise 0.
|
/linux-5.10/arch/mips/ralink/ |
D | irq.c | 99 u32 pending = rt_intc_r32(INTC_REG_STATUS0); in ralink_intc_irq_handler() local 101 if (pending) { in ralink_intc_irq_handler() 103 generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); in ralink_intc_irq_handler() 111 unsigned long pending; in plat_irq_dispatch() local 113 pending = read_c0_status() & read_c0_cause() & ST0_IM; in plat_irq_dispatch() 115 if (pending & STATUSF_IP7) in plat_irq_dispatch() 118 else if (pending & STATUSF_IP5) in plat_irq_dispatch() 121 else if (pending & STATUSF_IP6) in plat_irq_dispatch() 124 else if (pending & STATUSF_IP4) in plat_irq_dispatch() 127 else if (pending & STATUSF_IP2) in plat_irq_dispatch()
|
/linux-5.10/include/asm-generic/ |
D | qspinlock_types.h | 20 * pending bit, we can allow better optimization of the lock 21 * acquisition for the pending bit holder. 26 u8 pending; member 39 u8 pending; member 56 * 8: pending 63 * 8: pending 79 #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
|
/linux-5.10/arch/um/os-Linux/ |
D | signal.c | 155 unsigned long pending = 1UL << sig; in hard_handler() local 161 * pending comes back with one bit set for each in hard_handler() 170 bail = to_irq_stack(&pending); in hard_handler() 174 nested = pending & 1; in hard_handler() 175 pending &= ~1; in hard_handler() 177 while ((sig = ffs(pending)) != 0){ in hard_handler() 179 pending &= ~(1 << sig); in hard_handler() 184 * Again, pending comes back with a mask of signals in hard_handler() 190 pending = from_irq_stack(nested); in hard_handler() 191 } while (pending); in hard_handler() [all …]
|
/linux-5.10/drivers/gpu/drm/qxl/ |
D | qxl_irq.c | 36 uint32_t pending; in qxl_irq_handler() local 38 pending = xchg(&qdev->ram_header->int_pending, 0); in qxl_irq_handler() 40 if (!pending) in qxl_irq_handler() 45 if (pending & QXL_INTERRUPT_DISPLAY) { in qxl_irq_handler() 50 if (pending & QXL_INTERRUPT_CURSOR) { in qxl_irq_handler() 54 if (pending & QXL_INTERRUPT_IO_CMD) { in qxl_irq_handler() 58 if (pending & QXL_INTERRUPT_ERROR) { in qxl_irq_handler() 66 if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { in qxl_irq_handler()
|
/linux-5.10/kernel/irq/ |
D | migration.c | 9 * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU 11 * @force_clear: If set clear the move pending bit unconditionally. 13 * last one in the pending mask. 15 * Returns true if the pending bit was set and the pending mask contains an 26 * The outgoing CPU might be the last online target in a pending in irq_fixup_move_pending() 27 * interrupt move. If that's the case clear the pending move bit. in irq_fixup_move_pending() 82 * If the there is a cleanup pending in the underlying in irq_move_masked_irq()
|
D | irq_sim.c | 17 unsigned long *pending; member 65 *state = test_bit(hwirq, irq_ctx->work_ctx->pending); in irq_sim_get_irqchip_state() 83 assign_bit(hwirq, irq_ctx->work_ctx->pending, state); in irq_sim_set_irqchip_state() 112 while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) { in irq_sim_handle_irq() 113 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq() 115 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq() 177 work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL); in irq_domain_create_sim() 178 if (!work_ctx->pending) in irq_domain_create_sim() 193 bitmap_free(work_ctx->pending); in irq_domain_create_sim() 212 bitmap_free(work_ctx->pending); in irq_domain_remove_sim()
|
/linux-5.10/drivers/hid/ |
D | hid-sensor-hub.c | 23 * @lock: Spin lock to protect pending request structure. 304 memset(&hsdev->pending, 0, sizeof(hsdev->pending)); in sensor_hub_input_attr_get_raw_value() 305 init_completion(&hsdev->pending.ready); in sensor_hub_input_attr_get_raw_value() 306 hsdev->pending.usage_id = usage_id; in sensor_hub_input_attr_get_raw_value() 307 hsdev->pending.attr_usage_id = attr_usage_id; in sensor_hub_input_attr_get_raw_value() 308 hsdev->pending.raw_size = 0; in sensor_hub_input_attr_get_raw_value() 311 hsdev->pending.status = true; in sensor_hub_input_attr_get_raw_value() 319 &hsdev->pending.ready, HZ*5); in sensor_hub_input_attr_get_raw_value() 320 switch (hsdev->pending.raw_size) { in sensor_hub_input_attr_get_raw_value() 323 ret_val = *(s8 *)hsdev->pending.raw_data; in sensor_hub_input_attr_get_raw_value() [all …]
|
/linux-5.10/drivers/irqchip/ |
D | irq-loongson-htpic.c | 33 uint32_t pending; in htpic_irq_dispatch() local 36 pending = readl(priv->base); in htpic_irq_dispatch() 38 writel(pending, priv->base); in htpic_irq_dispatch() 40 if (!pending) in htpic_irq_dispatch() 43 while (pending) { in htpic_irq_dispatch() 44 int bit = __ffs(pending); in htpic_irq_dispatch() 52 pending &= ~BIT(bit); in htpic_irq_dispatch() 67 /* Ack all possible pending IRQs */ in htpic_reg_init()
|
D | irq-ath79-cpu.c | 36 unsigned long pending; in plat_irq_dispatch() local 39 pending = read_c0_status() & read_c0_cause() & ST0_IM; in plat_irq_dispatch() 41 if (!pending) { in plat_irq_dispatch() 46 pending >>= CAUSEB_IP; in plat_irq_dispatch() 47 while (pending) { in plat_irq_dispatch() 48 irq = fls(pending) - 1; in plat_irq_dispatch() 52 pending &= ~BIT(irq); in plat_irq_dispatch()
|
/linux-5.10/drivers/staging/hikey9xx/ |
D | hi6421-spmi-pmic.c | 127 unsigned long pending; in hi6421_spmi_irq_handler() local 131 pending = hi6421_spmi_pmic_read(pmic, (i + SOC_PMIC_IRQ0_ADDR)); in hi6421_spmi_irq_handler() 132 pending &= HISI_MASK_FIELD; in hi6421_spmi_irq_handler() 133 if (pending != 0) in hi6421_spmi_irq_handler() 134 pr_debug("pending[%d]=0x%lx\n\r", i, pending); in hi6421_spmi_irq_handler() 136 hi6421_spmi_pmic_write(pmic, (i + SOC_PMIC_IRQ0_ADDR), pending); in hi6421_spmi_irq_handler() 140 ((pending & HISI_IRQ_KEY_VALUE) == HISI_IRQ_KEY_VALUE)) { in hi6421_spmi_irq_handler() 143 pending &= (~HISI_IRQ_KEY_VALUE); in hi6421_spmi_irq_handler() 146 if (pending) { in hi6421_spmi_irq_handler() 147 for_each_set_bit(offset, &pending, HISI_BITS) in hi6421_spmi_irq_handler() [all …]
|
/linux-5.10/arch/x86/kvm/ |
D | irq.c | 19 * check if there are pending timer events 32 * check if there is a pending userspace external interrupt 40 * check if there is pending interrupt from 49 * on interrupt.injected to know if there is a pending in kvm_cpu_has_extint() 53 * pending interrupt or should re-inject an injected in kvm_cpu_has_extint() 87 * check if there is pending interrupt without 100 * Read pending interrupt(from non-APIC source) 123 * Read pending interrupt vector and intack.
|
/linux-5.10/net/vmw_vsock/ |
D | vmci_transport.c | 43 struct sock *pending, 482 struct sock *pending; in vmci_transport_get_pending() local 493 pending = sk_vsock(vpending); in vmci_transport_get_pending() 494 sock_hold(pending); in vmci_transport_get_pending() 499 pending = NULL; in vmci_transport_get_pending() 501 return pending; in vmci_transport_get_pending() 505 static void vmci_transport_release_pending(struct sock *pending) in vmci_transport_release_pending() argument 507 sock_put(pending); in vmci_transport_release_pending() 910 /* Processing of pending connections for servers goes through in vmci_transport_recv_pkt_work() 941 struct sock *pending; in vmci_transport_recv_listen() local [all …]
|
/linux-5.10/kernel/locking/ |
D | qspinlock.c | 46 * unlock the next pending (next->locked), we compress both these: {tail, 89 * The pending bit spinning loop count. 143 * clear_pending - clear the pending bit. 150 WRITE_ONCE(lock->pending, 0); in clear_pending() 154 * clear_pending_set_locked - take ownership and clear the pending bit. 189 * clear_pending - clear the pending bit. 200 * clear_pending_set_locked - take ownership and clear the pending bit. 242 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending 299 * (queue tail, pending bit, lock value) 306 * pending : (0,1,1) +--> (0,1,0) \ | : [all …]
|
/linux-5.10/arch/mips/loongson32/common/ |
D | irq.c | 119 /* Get pending sources, masked by current enables */ in ls1x_irq_dispatch() 131 unsigned int pending; in plat_irq_dispatch() local 133 pending = read_c0_cause() & read_c0_status() & ST0_IM; in plat_irq_dispatch() 135 if (pending & CAUSEF_IP7) in plat_irq_dispatch() 137 else if (pending & CAUSEF_IP2) in plat_irq_dispatch() 139 else if (pending & CAUSEF_IP3) in plat_irq_dispatch() 141 else if (pending & CAUSEF_IP4) in plat_irq_dispatch() 143 else if (pending & CAUSEF_IP5) in plat_irq_dispatch() 145 else if (pending & CAUSEF_IP6) in plat_irq_dispatch() 156 /* Disable interrupts and clear pending, in ls1x_irq_init()
|
/linux-5.10/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_sp.h | 31 /* Wait until all pending commands complete */ 41 * pending commands list. 44 /* If there is another pending ramrod, wait until it finishes and 88 int state; /* "ramrod is pending" state bit */ 193 /* Commands pending for an execution. */ 196 /* Commands pending for an completion. */ 216 * Called before removing pending commands, cleaning allocated 222 * This will try to cancel the current pending commands list 303 bool head_exe_request; /* Pending execution request. */ 304 unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ [all …]
|