| /linux/drivers/accel/rocket/ |
| H A D | rocket_drv.c | 88 rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev); in rocket_open() 174 rdev->cores[core].rdev = rdev; in rocket_probe() 175 rdev->cores[core].dev = &pdev->dev; in rocket_probe() 176 rdev->cores[core].index = core; in rocket_probe() 180 return rocket_core_init(&rdev->cores[core]); in rocket_probe() 188 if (rdev->cores[core].dev == dev) { in rocket_remove() 189 rocket_core_fini(&rdev->cores[core]); in rocket_remove() 213 if (dev == rdev->cores[core].dev) in find_core_for_dev() 229 err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks); in rocket_device_runtime_resume() 246 if (!rocket_job_is_idle(&rdev->cores[core])) in rocket_device_runtime_suspend() [all …]
|
| H A D | rocket_device.c | 34 rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL); in rocket_device_init() 35 if (!rdev->cores) in rocket_device_init()
|
| /linux/Documentation/admin-guide/ |
| H A D | lockup-watchdogs.rst | 67 By default, the watchdog runs on all online cores. However, on a 69 on the housekeeping cores, not the cores specified in the "nohz_full" 71 the "nohz_full" cores, we would have to run timer ticks to activate 73 from protecting the user code on those cores from the kernel. 74 Of course, disabling it by default on the nohz_full cores means that 75 when those cores do enter the kernel, by default we will not be 77 to continue to run on the housekeeping (non-tickless) cores means 78 that we will continue to detect lockups properly on those cores. 80 In either case, the set of cores excluded from running the watchdog 82 nohz_full cores, this may be useful for debugging a case where the [all …]
|
| /linux/drivers/remoteproc/ |
| H A D | ti_k3_r5_remoteproc.c | 101 struct list_head cores; member 186 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 197 list_for_each_entry(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 211 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 217 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_reset() 219 list_for_each_entry_from_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_reset() 234 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 247 list_for_each_entry_reverse(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 259 list_for_each_entry_continue(core, &cluster->cores, elem) { in k3_r5_lockstep_release() 263 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); in k3_r5_lockstep_release() [all …]
|
| /linux/drivers/gpu/drm/nouveau/dispnv50/ |
| H A D | core.c | 44 } cores[] = { in nv50_core_new() local 67 cid = nvif_mclass(&disp->disp->object, cores); in nv50_core_new() 73 return cores[cid].new(drm, cores[cid].oclass, pcore); in nv50_core_new()
|
| /linux/Documentation/networking/device_drivers/can/freescale/ |
| H A D | flexcan.rst | 13 For most flexcan IP cores the driver supports 2 RX modes: 18 The older flexcan cores (integrated into the i.MX25, i.MX28, i.MX35 28 cores come up in a mode where RTR reception is possible. 39 On some IP cores the controller cannot receive RTR frames in the 45 Waive ability to receive RTR frames. (not supported on all IP cores) 48 some IP cores RTR frames cannot be received anymore.
|
| /linux/Documentation/devicetree/bindings/media/xilinx/ |
| H A D | video.txt | 1 DT bindings for Xilinx video IP cores 4 Xilinx video IP cores process video streams by acting as video sinks and/or 10 cores are represented as defined in ../video-interfaces.txt. 18 The following properties are common to all Xilinx video IP cores. 21 AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
|
| /linux/drivers/net/can/esd/ |
| H A D | esd_402_pci-core.c | 50 struct acc_core *cores; member 103 irq_status = acc_card_interrupt(&card->ov, card->cores); in pci402_interrupt() 195 card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores, in pci402_init_card() 197 if (!card->cores) in pci402_init_card() 286 acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf); in pci402_init_dma() 316 struct acc_core *core = &card->cores[i]; in pci402_finish_dma() 343 struct acc_core *core = &card->cores[i]; in pci402_init_cores() 404 pci402_unregister_core(&card->cores[i]); in pci402_init_cores() 415 pci402_unregister_core(&card->cores[i]); in pci402_finish_cores()
|
| /linux/Documentation/devicetree/bindings/bus/ |
| H A D | brcm,bus-axi.txt | 9 The cores on the AXI bus are automatically detected by bcma with the 12 BCM47xx/BCM53xx ARM SoCs. To assign IRQ numbers to the cores, provide 17 The top-level axi bus may contain children representing attached cores 19 detected (e.g. IRQ numbers). Also some of the cores may be responsible
|
| /linux/Documentation/arch/x86/ |
| H A D | amd-hfi.rst | 16 architectural class and CPUs are comprised of cores of various efficiency and 17 power capabilities: performance-oriented *classic cores* and power-efficient 18 *dense cores*. As such, power management strategies must be designed to 26 sending background threads to the dense cores while sending high priority 27 threads to the classic cores. From a performance perspective, sending 28 background threads to dense cores can free up power headroom and allow the 29 classic cores to optimally service demanding threads. Furthermore, the area 30 optimized nature of the dense cores allows for an increasing number of 31 physical cores. This improved core density will have positive multithreaded
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | topology_common.c | 81 unsigned int cores, core_shift, smt_shift = 0; in parse_legacy() local 84 cores = parse_num_cores_legacy(c); in parse_legacy() 85 core_shift = get_count_order(cores); in parse_legacy() 96 cores <<= smt_shift; in parse_legacy() 100 topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores); in parse_legacy()
|
| /linux/arch/riscv/ |
| H A D | Kconfig.errata | 9 here if your platform uses Andes CPU cores. 20 non-standard handling on non-coherent operations on Andes cores. 30 here if your platform uses MIPS CPU cores. 53 here if your platform uses SiFive CPU cores. 104 here if your platform uses T-HEAD CPU cores. 137 The T-Head C9xx cores implement a PMU overflow extension very 150 The T-Head C9xx cores have a vulnerability in the xtheadvector
|
| /linux/arch/x86/mm/ |
| H A D | amdtopology.c | 58 unsigned int numnodes, cores, apicid; in amd_numa_init() local 163 cores = topology_get_domain_size(TOPO_CORE_DOMAIN); in amd_numa_init() 170 for (j = 0; j < cores; j++, apicid++) in amd_numa_init()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-bus-platform-drivers-amd_x3d_vcache | 9 - "frequency" cores within the faster CCD are prioritized before 11 - "cache" cores within the larger L3 CCD are prioritized before
|
| H A D | sysfs-bus-bcma | 14 There are a few types of BCMA cores, they can be identified by 22 BCMA cores of the same type can still slightly differ depending
|
| /linux/drivers/net/ethernet/airoha/ |
| H A D | airoha_npu.c | 162 spin_lock_bh(&npu->cores[core].lock); in airoha_npu_send_msg() 178 spin_unlock_bh(&npu->cores[core].lock); in airoha_npu_send_msg() 260 c = core - &npu->cores[0]; in airoha_npu_wdt_work() 272 int c = core - &npu->cores[0]; in airoha_npu_wdt_handler() 662 for (i = 0; i < ARRAY_SIZE(npu->cores); i++) { in airoha_npu_probe() 663 struct airoha_npu_core *core = &npu->cores[i]; in airoha_npu_probe() 682 irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1); in airoha_npu_probe() 724 for (i = 0; i < ARRAY_SIZE(npu->cores); i++) in airoha_npu_remove() 725 cancel_work_sync(&npu->cores[i].wdt_work); in airoha_npu_remove()
|
| /linux/drivers/bcma/ |
| H A D | main.c | 92 list_for_each_entry(core, &bus->cores, list) { in bcma_find_core_unit() 272 INIT_LIST_HEAD(&bus->cores); in bcma_init_bus() 296 list_for_each_entry(core, &bus->cores, list) { in bcma_register_devices() 372 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 382 list_for_each_entry_safe(core, tmp, &bus->cores, list) { in bcma_unregister_cores() 418 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_register() 543 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_suspend() 564 list_for_each_entry(core, &bus->cores, list) { in bcma_bus_resume()
|
| /linux/sound/soc/sof/ |
| H A D | ipc4-mtrace.c | 113 struct sof_mtrace_core_data cores[]; member 403 debugfs_create_file(dfs_name, 0444, dfs_root, &priv->cores[i], in mtrace_debugfs_create() 483 struct sof_mtrace_core_data *core_data = &priv->cores[i]; in ipc4_mtrace_disable() 519 core_data = &priv->cores[core]; in sof_mtrace_find_core_slots() 556 priv = devm_kzalloc(sdev->dev, struct_size(priv, cores, sdev->num_cores), in ipc4_mtrace_init() 570 struct sof_mtrace_core_data *core_data = &priv->cores[i]; in ipc4_mtrace_init() 625 core_data = &priv->cores[core]; in sof_ipc4_mtrace_update_pos()
|
| /linux/drivers/gpu/drm/v3d/ |
| H A D | v3d_irq.c | 256 for (core = 0; core < v3d->cores; core++) in v3d_irq_init() 312 for (core = 0; core < v3d->cores; core++) { in v3d_irq_enable() 327 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable() 338 for (core = 0; core < v3d->cores; core++) in v3d_irq_disable()
|
| /linux/arch/arm/boot/dts/arm/ |
| H A D | vexpress-v2p-ca15-tc1.dts | 199 regulator-cores { 210 amp-cores { 211 /* Total current for the two cores */ 224 power-cores {
|
| /linux/arch/arm64/boot/dts/qcom/ |
| H A D | sdm632.dtsi | 45 * cpu0-3 are efficiency cores, cpu4-7 are performance cores
|
| /linux/Documentation/devicetree/bindings/ |
| H A D | xilinx.txt | 1 d) Xilinx IP cores 3 The Xilinx EDK toolchain ships with a set of IP cores (devices) for use 14 device drivers how the IP cores are configured, but it requires the kernel 20 properties of the device node. In general, device nodes for IP-cores 89 That covers the general approach to binding xilinx IP cores into the
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | unstriped.rst | 85 Intel NVMe drives contain two cores on the physical device. 88 in a 256k stripe across the two cores:: 100 are striped across the two cores. When we unstripe this hardware RAID 0 113 unstriped on top of Intel NVMe device that has 2 cores
|
| /linux/tools/power/cpupower/lib/ |
| H A D | cpupower.c | 177 cpu_top->pkgs = cpu_top->cores = 0; in get_cpu_topology() 218 cpu_top->cores = 1; in get_cpu_topology() 223 cpu_top->cores++; in get_cpu_topology()
|
| /linux/Documentation/locking/ |
| H A D | percpu-rw-semaphore.rst | 9 cores take the lock for reading, the cache line containing the semaphore 10 is bouncing between L1 caches of the cores, causing performance
|