Lines Matching +full:mipi +full:- +full:to +full:- +full:edp

1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
8 * Permission is hereby granted, free of charge, to any person obtaining a
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
38 #include <linux/io-mapping.h>
40 #include <linux/i2c-algo-bit.h>
43 #include <linux/intel-iommu.h>
48 #include <linux/dma-resv.h>
53 #include <drm/intel-gtt.h>
120 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
171 /* Whether or not to count short HPD IRQs in HPD storms */
175 * if we get a HPD irq from DP and a HPD irq from non-DP
176 * the non-DP HPD could block the workqueue on a mode config
178 * userspace is waiting on the DP workqueue to run which is
179 * blocked behind the non-DP one.
212 * will fail. This is a stop gap measure to limit the badly behaving
213 * clients access to gpu. Note that unbannable contexts never increment
232 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
233 * - Support vertical blank on secondary display pipe
284 * fills out the pipe-config with the hw state. */
350 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
353 large Last-Level-Cache. LLC is coherent with
354 the CPU, but L3 is only visible to the GPU. */
358 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
382 * Due to the atomic rules we can't access some structures without the
383 * appropriate locking, so we cache information here in order to avoid
400 * to a tile, offsets within a tile are handled in
422 * This structure contains everything that's relevant to program the
423 * hardware registers. When we want to figure out if we need to disable
424 * and re-enable FBC for a new configuration we just check if there's
426 * are supposed to read from it in order to program the registers.
452 * HIGH_RR is the highest eDP panel refresh rate read from EDID
453 * LOW_RR is the lowest eDP panel refresh rate found from EDID
578 * Count of objects pending destructions. Used to skip needlessly
579 * waiting on an RCU barrier if no objects are waiting to be freed.
595 * Workqueue to fault in userptr pages, flushed by the execbuf
596 * when required but otherwise left to userspace to try again
621 /* Non-NULL if port present. */
681 } edp; member
703 /* MIPI DSI */
777 return entry->end - entry->start; in skl_ddb_entry_size()
783 if (e1->start == e2->start && e1->end == e2->end) in skl_ddb_entry_equal()
793 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
818 /* FIXME: Device release actions should all be moved to drmm_ */
824 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
825 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
829 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
830 * end of stolen which we can optionally use to create GEM objects
832 * exactly how much of this we are actually allowed to use, given that
843 * offlimits to certain functions.
845 * The drm_mm is initialised to the total accessible range, as found
846 * from the PCI config. On Broadwell+, this is further restricted to
877 /* MMIO base address for MIPI regs */
895 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
902 /** Cached value of IMR to avoid reads in updating the bitfield */
953 * wq - Driver workqueue for GEM.
955 * NOTE: Work items scheduled here are not allowed to grab any modeset
1032 * will use this later to figure out which PLLs and PHYs are unavailable
1062 * av_mutex - mutex for audio/video sync
1074 * Shadows for CHV DPLL_MD regs to keep the state
1125 * protects * intel_crtc->wm.active and
1126 * crtc_state->wm.need_postvbl_update.
1132 * need to know when we're still using BIOS-provided values
1184 * on the struct file, and we do not want to prolong the
1185 * lifetime of the local fd. To minimise the number of
1186 * anonymous inodes we create, we use a global singleton to
1202 /* Used to save the pipe-to-encoder mapping for audio */
1216 /* Mutex to protect the above hdcp component related values. */
1247 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1251 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1253 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1260 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1262 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1266 (engine__) && (engine__)->uabi_class == (class__); \
1267 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1269 #define I915_GTT_OFFSET_NONE ((u32)-1)
1272 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1273 * considered to be the frontbuffer for the given plane interface-wise. This
1286 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1288 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1291 #define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
1292 #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
1293 #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
1295 #define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
1296 #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
1299 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
1304 GENMASK((e) - 1, (s) - 1))
1308 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1312 INTEL_INFO(dev_priv)->gen == (n))
1314 #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
1319 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1329 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; in __platform_mask_index()
1333 pbits * ARRAY_SIZE(info->platform_mask)); in __platform_mask_index()
1343 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; in __platform_mask_bit()
1353 return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; in intel_subplatform()
1365 return info->platform_mask[pi] & BIT(pb); in IS_PLATFORM()
1375 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; in IS_SUBPLATFORM()
1376 const u32 mask = info->platform_mask[pi]; in IS_SUBPLATFORM()
1383 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); in IS_SUBPLATFORM()
1386 #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
1387 #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
1409 INTEL_INFO(dev_priv)->gt == 1)
1433 INTEL_INFO(dev_priv)->gt == 3)
1437 INTEL_INFO(dev_priv)->gt == 3)
1439 INTEL_INFO(dev_priv)->gt == 1)
1452 INTEL_INFO(dev_priv)->gt == 2)
1454 INTEL_INFO(dev_priv)->gt == 3)
1456 INTEL_INFO(dev_priv)->gt == 4)
1458 INTEL_INFO(dev_priv)->gt == 2)
1460 INTEL_INFO(dev_priv)->gt == 3)
1466 INTEL_INFO(dev_priv)->gt == 2)
1468 INTEL_INFO(dev_priv)->gt == 3)
1475 INTEL_INFO(dev_priv)->gt == 2)
1588 tgl_revids_get(p)->disp_stepping >= (since) && \
1589 tgl_revids_get(p)->disp_stepping <= (until))
1593 tgl_uy_revids->gt_stepping >= (since) && \
1594 tgl_uy_revids->gt_stepping <= (until))
1599 tgl_revids->gt_stepping >= (since) && \
1600 tgl_revids->gt_stepping <= (until))
1615 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1620 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1625 ((gt)->info.engine_mask & \
1626 GENMASK(first__ + count__ - 1, first__)) >> first__; \
1634 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1639 #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1640 #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
1641 #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
1646 #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
1649 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1651 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1653 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
1655 #define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
1659 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1667 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1670 #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
1672 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1691 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1697 #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1698 #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
1701 #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
1706 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
1708 #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1709 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1710 #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
1712 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1714 #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) …
1716 #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1717 #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
1720 #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
1722 #define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
1724 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1725 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1727 #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
1729 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1732 #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
1734 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1736 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
1739 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1744 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1751 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1753 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1757 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1806 * A single pass should suffice to release all the freed objects (along in i915_gem_drain_freed_objects()
1812 while (atomic_read(&i915->mm.free_count)) { in i915_gem_drain_freed_objects()
1813 flush_work(&i915->mm.free_work); in i915_gem_drain_freed_objects()
1821 * Similar to objects above (see i915_gem_drain_freed-objects), in in i915_gem_drain_workqueue()
1823 * themselves in their callbacks. To be paranoid, we need to in i915_gem_drain_workqueue()
1828 * than 3 passes to catch all _recursive_ RCU delayed work. in i915_gem_drain_workqueue()
1833 flush_workqueue(i915->wq); in i915_gem_drain_workqueue()
1836 } while (--pass); in i915_gem_drain_workqueue()
1837 drain_workqueue(i915->wq); in i915_gem_drain_workqueue()
1870 return atomic_read(&error->reset_count); in i915_reset_count()
1876 return atomic_read(&error->reset_engine_count[engine->uabi_class]); in i915_reset_engine_count()
1901 return xa_load(&file_priv->context_xa, id); in __i915_gem_context_lookup_rcu()
1911 if (ctx && !kref_get_unless_zero(&ctx->ref)) in i915_gem_context_lookup()
1937 struct drm_i915_private *i915 = to_i915(obj->base.dev); in i915_gem_object_needs_bit17_swizzle()
1939 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && in i915_gem_object_needs_bit17_swizzle()
1973 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
1980 /* These are untraced mmio-accessors that are only valid to be used inside
1988 * spin_lock_irq(&dev_priv->uncore.lock);
1994 * spin_unlock_irq(&dev_priv->uncore.lock);
2002 * by different clients (e.g. on Ivybridge). Access to registers should
2003 * therefore generally be serialised, by either the dev_priv->uncore.lock or
2004 * a more localised lock guarding all access to that bank of registers.
2033 return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz, in i915_cs_timestamp_ns_to_ticks()
2040 RUNTIME_INFO(i915)->cs_timestamp_frequency_hz); in i915_cs_timestamp_ticks_to_ns()