Lines Matching +full:render +full:- +full:max
60 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_capabilities()
61 struct drm_device *dev = node->minor->dev; in i915_capabilities()
64 seq_printf(m, "gen: %d\n", info->gen); in i915_capabilities()
66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) in i915_capabilities()
93 if (obj->user_pin_count > 0) in get_pin_flag()
95 else if (obj->pin_count > 0) in get_pin_flag()
103 switch (obj->tiling_mode) { in get_tiling_flag()
125 &obj->base, in describe_obj()
128 obj->base.size / 1024, in describe_obj()
129 obj->base.read_domains, in describe_obj()
130 obj->base.write_domain, in describe_obj()
131 obj->last_rendering_seqno, in describe_obj()
132 obj->last_fenced_seqno, in describe_obj()
133 cache_level_str(obj->cache_level), in describe_obj()
134 obj->dirty ? " dirty" : "", in describe_obj()
135 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); in describe_obj()
136 if (obj->base.name) in describe_obj()
137 seq_printf(m, " (name: %d)", obj->base.name); in describe_obj()
138 if (obj->fence_reg != I915_FENCE_REG_NONE) in describe_obj()
139 seq_printf(m, " (fence: %d)", obj->fence_reg); in describe_obj()
140 if (obj->gtt_space != NULL) in describe_obj()
142 obj->gtt_offset, (unsigned int)obj->gtt_space->size); in describe_obj()
143 if (obj->pin_mappable || obj->fault_mappable) { in describe_obj()
145 if (obj->pin_mappable) in describe_obj()
147 if (obj->fault_mappable) in describe_obj()
152 if (obj->ring != NULL) in describe_obj()
153 seq_printf(m, " (%s)", obj->ring->name); in describe_obj()
158 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_object_list_info()
159 uintptr_t list = (uintptr_t) node->info_ent->data; in i915_gem_object_list_info()
161 struct drm_device *dev = node->minor->dev; in i915_gem_object_list_info()
162 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gem_object_list_info()
167 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_object_list_info()
174 head = &dev_priv->mm.active_list; in i915_gem_object_list_info()
178 head = &dev_priv->mm.inactive_list; in i915_gem_object_list_info()
182 head = &dev_priv->mm.pinned_list; in i915_gem_object_list_info()
186 head = &dev_priv->mm.flushing_list; in i915_gem_object_list_info()
190 head = &dev_priv->mm.deferred_free_list; in i915_gem_object_list_info()
193 mutex_unlock(&dev->struct_mutex); in i915_gem_object_list_info()
194 return -EINVAL; in i915_gem_object_list_info()
202 total_obj_size += obj->base.size; in i915_gem_object_list_info()
203 total_gtt_size += obj->gtt_space->size; in i915_gem_object_list_info()
206 mutex_unlock(&dev->struct_mutex); in i915_gem_object_list_info()
215 size += obj->gtt_space->size; \
217 if (obj->map_and_fenceable) { \
218 mappable_size += obj->gtt_space->size; \
226 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_object_info()
227 struct drm_device *dev = node->minor->dev; in i915_gem_object_info()
228 struct drm_i915_private *dev_priv = dev->dev_private; in i915_gem_object_info()
234 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_object_info()
239 dev_priv->mm.object_count, in i915_gem_object_info()
240 dev_priv->mm.object_memory); in i915_gem_object_info()
243 count_objects(&dev_priv->mm.gtt_list, gtt_list); in i915_gem_object_info()
248 count_objects(&dev_priv->mm.active_list, mm_list); in i915_gem_object_info()
249 count_objects(&dev_priv->mm.flushing_list, mm_list); in i915_gem_object_info()
254 count_objects(&dev_priv->mm.pinned_list, mm_list); in i915_gem_object_info()
259 count_objects(&dev_priv->mm.inactive_list, mm_list); in i915_gem_object_info()
264 count_objects(&dev_priv->mm.deferred_free_list, mm_list); in i915_gem_object_info()
269 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { in i915_gem_object_info()
270 if (obj->fault_mappable) { in i915_gem_object_info()
271 size += obj->gtt_space->size; in i915_gem_object_info()
274 if (obj->pin_mappable) { in i915_gem_object_info()
275 mappable_size += obj->gtt_space->size; in i915_gem_object_info()
285 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); in i915_gem_object_info()
287 mutex_unlock(&dev->struct_mutex); in i915_gem_object_info()
294 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_gtt_info()
295 struct drm_device *dev = node->minor->dev; in i915_gem_gtt_info()
296 struct drm_i915_private *dev_priv = dev->dev_private; in i915_gem_gtt_info()
301 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_gtt_info()
306 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { in i915_gem_gtt_info()
310 total_obj_size += obj->base.size; in i915_gem_gtt_info()
311 total_gtt_size += obj->gtt_space->size; in i915_gem_gtt_info()
315 mutex_unlock(&dev->struct_mutex); in i915_gem_gtt_info()
326 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_pageflip_info()
327 struct drm_device *dev = node->minor->dev; in i915_gem_pageflip_info()
331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { in i915_gem_pageflip_info()
332 const char pipe = pipe_name(crtc->pipe); in i915_gem_pageflip_info()
333 const char plane = plane_name(crtc->plane); in i915_gem_pageflip_info()
336 spin_lock_irqsave(&dev->event_lock, flags); in i915_gem_pageflip_info()
337 work = crtc->unpin_work; in i915_gem_pageflip_info()
342 if (!work->pending) { in i915_gem_pageflip_info()
349 if (work->enable_stall_check) in i915_gem_pageflip_info()
353 seq_printf(m, "%d prepares\n", work->pending); in i915_gem_pageflip_info()
355 if (work->old_fb_obj) { in i915_gem_pageflip_info()
356 struct drm_i915_gem_object *obj = work->old_fb_obj; in i915_gem_pageflip_info()
358 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); in i915_gem_pageflip_info()
360 if (work->pending_flip_obj) { in i915_gem_pageflip_info()
361 struct drm_i915_gem_object *obj = work->pending_flip_obj; in i915_gem_pageflip_info()
363 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); in i915_gem_pageflip_info()
366 spin_unlock_irqrestore(&dev->event_lock, flags); in i915_gem_pageflip_info()
374 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_request_info()
375 struct drm_device *dev = node->minor->dev; in i915_gem_request_info()
376 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gem_request_info()
380 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_request_info()
385 if (!list_empty(&dev_priv->ring[RCS].request_list)) { in i915_gem_request_info()
386 seq_printf(m, "Render requests:\n"); in i915_gem_request_info()
388 &dev_priv->ring[RCS].request_list, in i915_gem_request_info()
391 gem_request->seqno, in i915_gem_request_info()
392 (int) (jiffies - gem_request->emitted_jiffies)); in i915_gem_request_info()
396 if (!list_empty(&dev_priv->ring[VCS].request_list)) { in i915_gem_request_info()
399 &dev_priv->ring[VCS].request_list, in i915_gem_request_info()
402 gem_request->seqno, in i915_gem_request_info()
403 (int) (jiffies - gem_request->emitted_jiffies)); in i915_gem_request_info()
407 if (!list_empty(&dev_priv->ring[BCS].request_list)) { in i915_gem_request_info()
410 &dev_priv->ring[BCS].request_list, in i915_gem_request_info()
413 gem_request->seqno, in i915_gem_request_info()
414 (int) (jiffies - gem_request->emitted_jiffies)); in i915_gem_request_info()
418 mutex_unlock(&dev->struct_mutex); in i915_gem_request_info()
429 if (ring->get_seqno) { in i915_ring_seqno_info()
431 ring->name, ring->get_seqno(ring)); in i915_ring_seqno_info()
433 ring->name, ring->waiting_seqno); in i915_ring_seqno_info()
435 ring->name, ring->irq_seqno); in i915_ring_seqno_info()
441 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_seqno_info()
442 struct drm_device *dev = node->minor->dev; in i915_gem_seqno_info()
443 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gem_seqno_info()
446 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_seqno_info()
451 i915_ring_seqno_info(m, &dev_priv->ring[i]); in i915_gem_seqno_info()
453 mutex_unlock(&dev->struct_mutex); in i915_gem_seqno_info()
461 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_interrupt_info()
462 struct drm_device *dev = node->minor->dev; in i915_interrupt_info()
463 drm_i915_private_t *dev_priv = dev->dev_private; in i915_interrupt_info()
466 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_interrupt_info()
502 atomic_read(&dev_priv->irq_received)); in i915_interrupt_info()
506 dev_priv->ring[i].name, in i915_interrupt_info()
507 I915_READ_IMR(&dev_priv->ring[i])); in i915_interrupt_info()
509 i915_ring_seqno_info(m, &dev_priv->ring[i]); in i915_interrupt_info()
511 mutex_unlock(&dev->struct_mutex); in i915_interrupt_info()
518 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_fence_regs_info()
519 struct drm_device *dev = node->minor->dev; in i915_gem_fence_regs_info()
520 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gem_fence_regs_info()
523 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gem_fence_regs_info()
527 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); in i915_gem_fence_regs_info()
528 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); in i915_gem_fence_regs_info()
529 for (i = 0; i < dev_priv->num_fence_regs; i++) { in i915_gem_fence_regs_info()
530 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; in i915_gem_fence_regs_info()
540 mutex_unlock(&dev->struct_mutex); in i915_gem_fence_regs_info()
546 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_hws_info()
547 struct drm_device *dev = node->minor->dev; in i915_hws_info()
548 drm_i915_private_t *dev_priv = dev->dev_private; in i915_hws_info()
553 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; in i915_hws_info()
554 hws = (volatile u32 __iomem *)ring->status_page.page_addr; in i915_hws_info()
572 page_count = obj->base.size / PAGE_SIZE; in i915_dump_object()
575 obj->gtt_offset + page * PAGE_SIZE); in i915_dump_object()
584 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_batchbuffer_info()
585 struct drm_device *dev = node->minor->dev; in i915_batchbuffer_info()
586 drm_i915_private_t *dev_priv = dev->dev_private; in i915_batchbuffer_info()
590 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_batchbuffer_info()
594 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { in i915_batchbuffer_info()
595 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { in i915_batchbuffer_info()
596 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); in i915_batchbuffer_info()
597 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); in i915_batchbuffer_info()
601 mutex_unlock(&dev->struct_mutex); in i915_batchbuffer_info()
607 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_ringbuffer_data()
608 struct drm_device *dev = node->minor->dev; in i915_ringbuffer_data()
609 drm_i915_private_t *dev_priv = dev->dev_private; in i915_ringbuffer_data()
613 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_ringbuffer_data()
617 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; in i915_ringbuffer_data()
618 if (!ring->obj) { in i915_ringbuffer_data()
621 const u8 __iomem *virt = ring->virtual_start; in i915_ringbuffer_data()
624 for (off = 0; off < ring->size; off += 4) { in i915_ringbuffer_data()
629 mutex_unlock(&dev->struct_mutex); in i915_ringbuffer_data()
636 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_ringbuffer_info()
637 struct drm_device *dev = node->minor->dev; in i915_ringbuffer_info()
638 drm_i915_private_t *dev_priv = dev->dev_private; in i915_ringbuffer_info()
642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; in i915_ringbuffer_info()
643 if (ring->size == 0) in i915_ringbuffer_info()
646 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_ringbuffer_info()
650 seq_printf(m, "Ring %s:\n", ring->name); in i915_ringbuffer_info()
653 seq_printf(m, " Size : %08x\n", ring->size); in i915_ringbuffer_info()
663 mutex_unlock(&dev->struct_mutex); in i915_ringbuffer_info()
671 case RING_RENDER: return " render"; in ring_str()
715 while (count--) { in print_error_buffers()
717 err->gtt_offset, in print_error_buffers()
718 err->size, in print_error_buffers()
719 err->read_domains, in print_error_buffers()
720 err->write_domain, in print_error_buffers()
721 err->seqno, in print_error_buffers()
722 pin_flag(err->pinned), in print_error_buffers()
723 tiling_flag(err->tiling), in print_error_buffers()
724 dirty_flag(err->dirty), in print_error_buffers()
725 purgeable_flag(err->purgeable), in print_error_buffers()
726 ring_str(err->ring), in print_error_buffers()
727 cache_level_str(err->cache_level)); in print_error_buffers()
729 if (err->name) in print_error_buffers()
730 seq_printf(m, " (name: %d)", err->name); in print_error_buffers()
731 if (err->fence_reg != I915_FENCE_REG_NONE) in print_error_buffers()
732 seq_printf(m, " (fence: %d)", err->fence_reg); in print_error_buffers()
741 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_error_state()
742 struct drm_device *dev = node->minor->dev; in i915_error_state()
743 drm_i915_private_t *dev_priv = dev->dev_private; in i915_error_state()
748 spin_lock_irqsave(&dev_priv->error_lock, flags); in i915_error_state()
749 if (!dev_priv->first_error) { in i915_error_state()
754 error = dev_priv->first_error; in i915_error_state()
756 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, in i915_error_state()
757 error->time.tv_usec); in i915_error_state()
758 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); in i915_error_state()
759 seq_printf(m, "EIR: 0x%08x\n", error->eir); in i915_error_state()
760 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); in i915_error_state()
761 if (INTEL_INFO(dev)->gen >= 6) { in i915_error_state()
762 seq_printf(m, "ERROR: 0x%08x\n", error->error); in i915_error_state()
764 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); in i915_error_state()
765 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); in i915_error_state()
766 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); in i915_error_state()
767 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); in i915_error_state()
768 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); in i915_error_state()
770 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); in i915_error_state()
771 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); in i915_error_state()
772 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); in i915_error_state()
773 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); in i915_error_state()
774 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); in i915_error_state()
776 seq_printf(m, "Render command stream:\n"); in i915_error_state()
777 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); in i915_error_state()
778 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); in i915_error_state()
779 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); in i915_error_state()
780 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); in i915_error_state()
781 if (INTEL_INFO(dev)->gen >= 4) { in i915_error_state()
782 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); in i915_error_state()
783 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); in i915_error_state()
785 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); in i915_error_state()
786 seq_printf(m, " seqno: 0x%08x\n", error->seqno); in i915_error_state()
788 for (i = 0; i < dev_priv->num_fence_regs; i++) in i915_error_state()
789 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); in i915_error_state()
791 if (error->active_bo) in i915_error_state()
793 error->active_bo, in i915_error_state()
794 error->active_bo_count); in i915_error_state()
796 if (error->pinned_bo) in i915_error_state()
798 error->pinned_bo, in i915_error_state()
799 error->pinned_bo_count); in i915_error_state()
801 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { in i915_error_state()
802 if (error->batchbuffer[i]) { in i915_error_state()
803 struct drm_i915_error_object *obj = error->batchbuffer[i]; in i915_error_state()
805 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", in i915_error_state()
806 dev_priv->ring[i].name, in i915_error_state()
807 obj->gtt_offset); in i915_error_state()
809 for (page = 0; page < obj->page_count; page++) { in i915_error_state()
811 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); in i915_error_state()
818 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { in i915_error_state()
819 if (error->ringbuffer[i]) { in i915_error_state()
820 struct drm_i915_error_object *obj = error->ringbuffer[i]; in i915_error_state()
821 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", in i915_error_state()
822 dev_priv->ring[i].name, in i915_error_state()
823 obj->gtt_offset); in i915_error_state()
825 for (page = 0; page < obj->page_count; page++) { in i915_error_state()
829 obj->pages[page][elt]); in i915_error_state()
836 if (error->overlay) in i915_error_state()
837 intel_overlay_print_error_state(m, error->overlay); in i915_error_state()
839 if (error->display) in i915_error_state()
840 intel_display_print_error_state(m, dev, error->display); in i915_error_state()
843 spin_unlock_irqrestore(&dev_priv->error_lock, flags); in i915_error_state()
850 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_rstdby_delays()
851 struct drm_device *dev = node->minor->dev; in i915_rstdby_delays()
852 drm_i915_private_t *dev_priv = dev->dev_private; in i915_rstdby_delays()
856 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_rstdby_delays()
862 mutex_unlock(&dev->struct_mutex); in i915_rstdby_delays()
871 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_cur_delayinfo()
872 struct drm_device *dev = node->minor->dev; in i915_cur_delayinfo()
873 drm_i915_private_t *dev_priv = dev->dev_private; in i915_cur_delayinfo()
880 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); in i915_cur_delayinfo()
884 seq_printf(m, "Current P-state: %d\n", in i915_cur_delayinfo()
896 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_cur_delayinfo()
911 mutex_unlock(&dev->struct_mutex); in i915_cur_delayinfo()
915 seq_printf(m, "Render p-state ratio: %d\n", in i915_cur_delayinfo()
917 seq_printf(m, "Render p-state VID: %d\n", in i915_cur_delayinfo()
919 seq_printf(m, "Render p-state limit: %d\n", in i915_cur_delayinfo()
945 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", in i915_cur_delayinfo()
948 seq_printf(m, "no P-state info available\n"); in i915_cur_delayinfo()
956 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_delayfreq_table()
957 struct drm_device *dev = node->minor->dev; in i915_delayfreq_table()
958 drm_i915_private_t *dev_priv = dev->dev_private; in i915_delayfreq_table()
962 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_delayfreq_table()
972 mutex_unlock(&dev->struct_mutex); in i915_delayfreq_table()
979 return 1250 - (map * 25); in MAP_TO_MV()
984 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_inttoext_table()
985 struct drm_device *dev = node->minor->dev; in i915_inttoext_table()
986 drm_i915_private_t *dev_priv = dev->dev_private; in i915_inttoext_table()
990 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_inttoext_table()
999 mutex_unlock(&dev->struct_mutex); in i915_inttoext_table()
1006 struct drm_info_node *node = (struct drm_info_node *) m->private; in ironlake_drpc_info()
1007 struct drm_device *dev = node->minor->dev; in ironlake_drpc_info()
1008 drm_i915_private_t *dev_priv = dev->dev_private; in ironlake_drpc_info()
1013 ret = mutex_lock_interruptible(&dev->struct_mutex); in ironlake_drpc_info()
1021 mutex_unlock(&dev->struct_mutex); in ironlake_drpc_info()
1036 seq_printf(m, "Max P-state: P%d\n", in ironlake_drpc_info()
1038 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); in ironlake_drpc_info()
1041 seq_printf(m, "Render standby enabled: %s\n", in ironlake_drpc_info()
1074 struct drm_info_node *node = (struct drm_info_node *) m->private; in gen6_drpc_info()
1075 struct drm_device *dev = node->minor->dev; in gen6_drpc_info()
1076 struct drm_i915_private *dev_priv = dev->dev_private; in gen6_drpc_info()
1082 ret = mutex_lock_interruptible(&dev->struct_mutex); in gen6_drpc_info()
1086 spin_lock_irq(&dev_priv->gt_lock); in gen6_drpc_info()
1087 forcewake_count = dev_priv->forcewake_count; in gen6_drpc_info()
1088 spin_unlock_irq(&dev_priv->gt_lock); in gen6_drpc_info()
1100 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); in gen6_drpc_info()
1105 mutex_unlock(&dev->struct_mutex); in gen6_drpc_info()
1151 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_drpc_info()
1152 struct drm_device *dev = node->minor->dev; in i915_drpc_info()
1162 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_fbc_status()
1163 struct drm_device *dev = node->minor->dev; in i915_fbc_status()
1164 drm_i915_private_t *dev_priv = dev->dev_private; in i915_fbc_status()
1175 switch (dev_priv->no_fbc_reason) { in i915_fbc_status()
1210 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_sr_status()
1211 struct drm_device *dev = node->minor->dev; in i915_sr_status()
1212 drm_i915_private_t *dev_priv = dev->dev_private; in i915_sr_status()
1224 seq_printf(m, "self-refresh: %s\n", in i915_sr_status()
1232 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_emon_status()
1233 struct drm_device *dev = node->minor->dev; in i915_emon_status()
1234 drm_i915_private_t *dev_priv = dev->dev_private; in i915_emon_status()
1238 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_emon_status()
1245 mutex_unlock(&dev->struct_mutex); in i915_emon_status()
1257 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_ring_freq_table()
1258 struct drm_device *dev = node->minor->dev; in i915_ring_freq_table()
1259 drm_i915_private_t *dev_priv = dev->dev_private; in i915_ring_freq_table()
1268 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_ring_freq_table()
1274 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; in i915_ring_freq_table()
1288 mutex_unlock(&dev->struct_mutex); in i915_ring_freq_table()
1295 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gfxec()
1296 struct drm_device *dev = node->minor->dev; in i915_gfxec()
1297 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gfxec()
1300 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_gfxec()
1306 mutex_unlock(&dev->struct_mutex); in i915_gfxec()
1313 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_opregion()
1314 struct drm_device *dev = node->minor->dev; in i915_opregion()
1315 drm_i915_private_t *dev_priv = dev->dev_private; in i915_opregion()
1316 struct intel_opregion *opregion = &dev_priv->opregion; in i915_opregion()
1319 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_opregion()
1323 if (opregion->header) in i915_opregion()
1324 seq_write(m, opregion->header, OPREGION_SIZE); in i915_opregion()
1326 mutex_unlock(&dev->struct_mutex); in i915_opregion()
1333 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gem_framebuffer_info()
1334 struct drm_device *dev = node->minor->dev; in i915_gem_framebuffer_info()
1335 drm_i915_private_t *dev_priv = dev->dev_private; in i915_gem_framebuffer_info()
1340 ret = mutex_lock_interruptible(&dev->mode_config.mutex); in i915_gem_framebuffer_info()
1344 ifbdev = dev_priv->fbdev; in i915_gem_framebuffer_info()
1345 fb = to_intel_framebuffer(ifbdev->helper.fb); in i915_gem_framebuffer_info()
1348 fb->base.width, in i915_gem_framebuffer_info()
1349 fb->base.height, in i915_gem_framebuffer_info()
1350 fb->base.depth, in i915_gem_framebuffer_info()
1351 fb->base.bits_per_pixel); in i915_gem_framebuffer_info()
1352 describe_obj(m, fb->obj); in i915_gem_framebuffer_info()
1355 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { in i915_gem_framebuffer_info()
1356 if (&fb->base == ifbdev->helper.fb) in i915_gem_framebuffer_info()
1360 fb->base.width, in i915_gem_framebuffer_info()
1361 fb->base.height, in i915_gem_framebuffer_info()
1362 fb->base.depth, in i915_gem_framebuffer_info()
1363 fb->base.bits_per_pixel); in i915_gem_framebuffer_info()
1364 describe_obj(m, fb->obj); in i915_gem_framebuffer_info()
1368 mutex_unlock(&dev->mode_config.mutex); in i915_gem_framebuffer_info()
1375 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_context_status()
1376 struct drm_device *dev = node->minor->dev; in i915_context_status()
1377 drm_i915_private_t *dev_priv = dev->dev_private; in i915_context_status()
1380 ret = mutex_lock_interruptible(&dev->mode_config.mutex); in i915_context_status()
1384 if (dev_priv->pwrctx) { in i915_context_status()
1386 describe_obj(m, dev_priv->pwrctx); in i915_context_status()
1390 if (dev_priv->renderctx) { in i915_context_status()
1391 seq_printf(m, "render context "); in i915_context_status()
1392 describe_obj(m, dev_priv->renderctx); in i915_context_status()
1396 mutex_unlock(&dev->mode_config.mutex); in i915_context_status()
1403 struct drm_info_node *node = (struct drm_info_node *) m->private; in i915_gen6_forcewake_count_info()
1404 struct drm_device *dev = node->minor->dev; in i915_gen6_forcewake_count_info()
1405 struct drm_i915_private *dev_priv = dev->dev_private; in i915_gen6_forcewake_count_info()
1408 spin_lock_irq(&dev_priv->gt_lock); in i915_gen6_forcewake_count_info()
1409 forcewake_count = dev_priv->forcewake_count; in i915_gen6_forcewake_count_info()
1410 spin_unlock_irq(&dev_priv->gt_lock); in i915_gen6_forcewake_count_info()
1421 filp->private_data = inode->i_private; in i915_wedged_open()
1428 size_t max, in i915_wedged_read() argument
1431 struct drm_device *dev = filp->private_data; in i915_wedged_read()
1432 drm_i915_private_t *dev_priv = dev->dev_private; in i915_wedged_read()
1438 atomic_read(&dev_priv->mm.wedged)); in i915_wedged_read()
1443 return simple_read_from_buffer(ubuf, max, ppos, buf, len); in i915_wedged_read()
1452 struct drm_device *dev = filp->private_data; in i915_wedged_write()
1457 if (cnt > sizeof(buf) - 1) in i915_wedged_write()
1458 return -EINVAL; in i915_wedged_write()
1461 return -EFAULT; in i915_wedged_write()
1485 filp->private_data = inode->i_private; in i915_max_freq_open()
1492 size_t max, in i915_max_freq_read() argument
1495 struct drm_device *dev = filp->private_data; in i915_max_freq_read()
1496 drm_i915_private_t *dev_priv = dev->dev_private; in i915_max_freq_read()
1501 "max freq: %d\n", dev_priv->max_delay * 50); in i915_max_freq_read()
1506 return simple_read_from_buffer(ubuf, max, ppos, buf, len); in i915_max_freq_read()
1515 struct drm_device *dev = filp->private_data; in i915_max_freq_write()
1516 struct drm_i915_private *dev_priv = dev->dev_private; in i915_max_freq_write()
1521 if (cnt > sizeof(buf) - 1) in i915_max_freq_write()
1522 return -EINVAL; in i915_max_freq_write()
1525 return -EFAULT; in i915_max_freq_write()
1531 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); in i915_max_freq_write()
1536 dev_priv->max_delay = val / 50; in i915_max_freq_write()
1555 filp->private_data = inode->i_private; in i915_cache_sharing_open()
1562 size_t max, in i915_cache_sharing_read() argument
1565 struct drm_device *dev = filp->private_data; in i915_cache_sharing_read()
1566 drm_i915_private_t *dev_priv = dev->dev_private; in i915_cache_sharing_read()
1571 mutex_lock(&dev_priv->dev->struct_mutex); in i915_cache_sharing_read()
1573 mutex_unlock(&dev_priv->dev->struct_mutex); in i915_cache_sharing_read()
1582 return simple_read_from_buffer(ubuf, max, ppos, buf, len); in i915_cache_sharing_read()
1591 struct drm_device *dev = filp->private_data; in i915_cache_sharing_write()
1592 struct drm_i915_private *dev_priv = dev->dev_private; in i915_cache_sharing_write()
1598 if (cnt > sizeof(buf) - 1) in i915_cache_sharing_write()
1599 return -EINVAL; in i915_cache_sharing_write()
1602 return -EFAULT; in i915_cache_sharing_write()
1609 return -EINVAL; in i915_cache_sharing_write()
1630 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1642 return -ENOMEM; in drm_add_fake_info_node()
1645 node->minor = minor; in drm_add_fake_info_node()
1646 node->dent = ent; in drm_add_fake_info_node()
1647 node->info_ent = (void *) key; in drm_add_fake_info_node()
1649 mutex_lock(&minor->debugfs_lock); in drm_add_fake_info_node()
1650 list_add(&node->list, &minor->debugfs_list); in drm_add_fake_info_node()
1651 mutex_unlock(&minor->debugfs_lock); in drm_add_fake_info_node()
1658 struct drm_device *dev = minor->dev; in i915_wedged_create()
1673 struct drm_device *dev = inode->i_private; in i915_forcewake_open()
1674 struct drm_i915_private *dev_priv = dev->dev_private; in i915_forcewake_open()
1677 if (INTEL_INFO(dev)->gen < 6) in i915_forcewake_open()
1680 ret = mutex_lock_interruptible(&dev->struct_mutex); in i915_forcewake_open()
1684 mutex_unlock(&dev->struct_mutex); in i915_forcewake_open()
1691 struct drm_device *dev = inode->i_private; in i915_forcewake_release()
1692 struct drm_i915_private *dev_priv = dev->dev_private; in i915_forcewake_release()
1694 if (INTEL_INFO(dev)->gen < 6) in i915_forcewake_release()
1700 * almost certainly the driver has hung, is not unload-able. Therefore in i915_forcewake_release()
1704 mutex_lock(&dev->struct_mutex); in i915_forcewake_release()
1706 mutex_unlock(&dev->struct_mutex); in i915_forcewake_release()
1719 struct drm_device *dev = minor->dev; in i915_forcewake_create()
1734 struct drm_device *dev = minor->dev; in i915_max_freq_create()
1749 struct drm_device *dev = minor->dev; in i915_cache_sharing_create()
1808 ret = i915_wedged_create(minor->debugfs_root, minor); in i915_debugfs_init()
1812 ret = i915_forcewake_create(minor->debugfs_root, minor); in i915_debugfs_init()
1815 ret = i915_max_freq_create(minor->debugfs_root, minor); in i915_debugfs_init()
1818 ret = i915_cache_sharing_create(minor->debugfs_root, minor); in i915_debugfs_init()
1824 minor->debugfs_root, minor); in i915_debugfs_init()