]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_debugfs.c
Merge commit 'Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux'
[~andy/linux] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <drm/drmP.h>
34 #include "intel_drv.h"
35 #include "intel_ringbuffer.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38
39 #define DRM_I915_RING_DEBUG 1
40
41
42 #if defined(CONFIG_DEBUG_FS)
43
44 enum {
45         ACTIVE_LIST,
46         INACTIVE_LIST,
47         PINNED_LIST,
48 };
49
50 static const char *yesno(int v)
51 {
52         return v ? "yes" : "no";
53 }
54
55 static int i915_capabilities(struct seq_file *m, void *data)
56 {
57         struct drm_info_node *node = (struct drm_info_node *) m->private;
58         struct drm_device *dev = node->minor->dev;
59         const struct intel_device_info *info = INTEL_INFO(dev);
60
61         seq_printf(m, "gen: %d\n", info->gen);
62         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
63 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
64 #define SEP_SEMICOLON ;
65         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
66 #undef PRINT_FLAG
67 #undef SEP_SEMICOLON
68
69         return 0;
70 }
71
72 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
73 {
74         if (obj->user_pin_count > 0)
75                 return "P";
76         else if (obj->pin_count > 0)
77                 return "p";
78         else
79                 return " ";
80 }
81
82 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
83 {
84         switch (obj->tiling_mode) {
85         default:
86         case I915_TILING_NONE: return " ";
87         case I915_TILING_X: return "X";
88         case I915_TILING_Y: return "Y";
89         }
90 }
91
92 static void
93 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
94 {
95         seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
96                    &obj->base,
97                    get_pin_flag(obj),
98                    get_tiling_flag(obj),
99                    obj->base.size / 1024,
100                    obj->base.read_domains,
101                    obj->base.write_domain,
102                    obj->last_read_seqno,
103                    obj->last_write_seqno,
104                    obj->last_fenced_seqno,
105                    i915_cache_level_str(obj->cache_level),
106                    obj->dirty ? " dirty" : "",
107                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
108         if (obj->base.name)
109                 seq_printf(m, " (name: %d)", obj->base.name);
110         if (obj->pin_count)
111                 seq_printf(m, " (pinned x %d)", obj->pin_count);
112         if (obj->fence_reg != I915_FENCE_REG_NONE)
113                 seq_printf(m, " (fence: %d)", obj->fence_reg);
114         if (i915_gem_obj_ggtt_bound(obj))
115                 seq_printf(m, " (gtt offset: %08lx, size: %08x)",
116                            i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
117         if (obj->stolen)
118                 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
119         if (obj->pin_mappable || obj->fault_mappable) {
120                 char s[3], *t = s;
121                 if (obj->pin_mappable)
122                         *t++ = 'p';
123                 if (obj->fault_mappable)
124                         *t++ = 'f';
125                 *t = '\0';
126                 seq_printf(m, " (%s mappable)", s);
127         }
128         if (obj->ring != NULL)
129                 seq_printf(m, " (%s)", obj->ring->name);
130 }
131
132 static int i915_gem_object_list_info(struct seq_file *m, void *data)
133 {
134         struct drm_info_node *node = (struct drm_info_node *) m->private;
135         uintptr_t list = (uintptr_t) node->info_ent->data;
136         struct list_head *head;
137         struct drm_device *dev = node->minor->dev;
138         struct drm_i915_private *dev_priv = dev->dev_private;
139         struct i915_address_space *vm = &dev_priv->gtt.base;
140         struct drm_i915_gem_object *obj;
141         size_t total_obj_size, total_gtt_size;
142         int count, ret;
143
144         ret = mutex_lock_interruptible(&dev->struct_mutex);
145         if (ret)
146                 return ret;
147
148         switch (list) {
149         case ACTIVE_LIST:
150                 seq_puts(m, "Active:\n");
151                 head = &vm->active_list;
152                 break;
153         case INACTIVE_LIST:
154                 seq_puts(m, "Inactive:\n");
155                 head = &vm->inactive_list;
156                 break;
157         default:
158                 mutex_unlock(&dev->struct_mutex);
159                 return -EINVAL;
160         }
161
162         total_obj_size = total_gtt_size = count = 0;
163         list_for_each_entry(obj, head, mm_list) {
164                 seq_puts(m, "   ");
165                 describe_obj(m, obj);
166                 seq_putc(m, '\n');
167                 total_obj_size += obj->base.size;
168                 total_gtt_size += i915_gem_obj_ggtt_size(obj);
169                 count++;
170         }
171         mutex_unlock(&dev->struct_mutex);
172
173         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
174                    count, total_obj_size, total_gtt_size);
175         return 0;
176 }
177
178 #define count_objects(list, member) do { \
179         list_for_each_entry(obj, list, member) { \
180                 size += i915_gem_obj_ggtt_size(obj); \
181                 ++count; \
182                 if (obj->map_and_fenceable) { \
183                         mappable_size += i915_gem_obj_ggtt_size(obj); \
184                         ++mappable_count; \
185                 } \
186         } \
187 } while (0)
188
189 struct file_stats {
190         int count;
191         size_t total, active, inactive, unbound;
192 };
193
194 static int per_file_stats(int id, void *ptr, void *data)
195 {
196         struct drm_i915_gem_object *obj = ptr;
197         struct file_stats *stats = data;
198
199         stats->count++;
200         stats->total += obj->base.size;
201
202         if (i915_gem_obj_ggtt_bound(obj)) {
203                 if (!list_empty(&obj->ring_list))
204                         stats->active += obj->base.size;
205                 else
206                         stats->inactive += obj->base.size;
207         } else {
208                 if (!list_empty(&obj->global_list))
209                         stats->unbound += obj->base.size;
210         }
211
212         return 0;
213 }
214
215 static int i915_gem_object_info(struct seq_file *m, void *data)
216 {
217         struct drm_info_node *node = (struct drm_info_node *) m->private;
218         struct drm_device *dev = node->minor->dev;
219         struct drm_i915_private *dev_priv = dev->dev_private;
220         u32 count, mappable_count, purgeable_count;
221         size_t size, mappable_size, purgeable_size;
222         struct drm_i915_gem_object *obj;
223         struct i915_address_space *vm = &dev_priv->gtt.base;
224         struct drm_file *file;
225         int ret;
226
227         ret = mutex_lock_interruptible(&dev->struct_mutex);
228         if (ret)
229                 return ret;
230
231         seq_printf(m, "%u objects, %zu bytes\n",
232                    dev_priv->mm.object_count,
233                    dev_priv->mm.object_memory);
234
235         size = count = mappable_size = mappable_count = 0;
236         count_objects(&dev_priv->mm.bound_list, global_list);
237         seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
238                    count, mappable_count, size, mappable_size);
239
240         size = count = mappable_size = mappable_count = 0;
241         count_objects(&vm->active_list, mm_list);
242         seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
243                    count, mappable_count, size, mappable_size);
244
245         size = count = mappable_size = mappable_count = 0;
246         count_objects(&vm->inactive_list, mm_list);
247         seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
248                    count, mappable_count, size, mappable_size);
249
250         size = count = purgeable_size = purgeable_count = 0;
251         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
252                 size += obj->base.size, ++count;
253                 if (obj->madv == I915_MADV_DONTNEED)
254                         purgeable_size += obj->base.size, ++purgeable_count;
255         }
256         seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
257
258         size = count = mappable_size = mappable_count = 0;
259         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
260                 if (obj->fault_mappable) {
261                         size += i915_gem_obj_ggtt_size(obj);
262                         ++count;
263                 }
264                 if (obj->pin_mappable) {
265                         mappable_size += i915_gem_obj_ggtt_size(obj);
266                         ++mappable_count;
267                 }
268                 if (obj->madv == I915_MADV_DONTNEED) {
269                         purgeable_size += obj->base.size;
270                         ++purgeable_count;
271                 }
272         }
273         seq_printf(m, "%u purgeable objects, %zu bytes\n",
274                    purgeable_count, purgeable_size);
275         seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
276                    mappable_count, mappable_size);
277         seq_printf(m, "%u fault mappable objects, %zu bytes\n",
278                    count, size);
279
280         seq_printf(m, "%zu [%lu] gtt total\n",
281                    dev_priv->gtt.base.total,
282                    dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
283
284         seq_putc(m, '\n');
285         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
286                 struct file_stats stats;
287
288                 memset(&stats, 0, sizeof(stats));
289                 idr_for_each(&file->object_idr, per_file_stats, &stats);
290                 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
291                            get_pid_task(file->pid, PIDTYPE_PID)->comm,
292                            stats.count,
293                            stats.total,
294                            stats.active,
295                            stats.inactive,
296                            stats.unbound);
297         }
298
299         mutex_unlock(&dev->struct_mutex);
300
301         return 0;
302 }
303
304 static int i915_gem_gtt_info(struct seq_file *m, void *data)
305 {
306         struct drm_info_node *node = (struct drm_info_node *) m->private;
307         struct drm_device *dev = node->minor->dev;
308         uintptr_t list = (uintptr_t) node->info_ent->data;
309         struct drm_i915_private *dev_priv = dev->dev_private;
310         struct drm_i915_gem_object *obj;
311         size_t total_obj_size, total_gtt_size;
312         int count, ret;
313
314         ret = mutex_lock_interruptible(&dev->struct_mutex);
315         if (ret)
316                 return ret;
317
318         total_obj_size = total_gtt_size = count = 0;
319         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
320                 if (list == PINNED_LIST && obj->pin_count == 0)
321                         continue;
322
323                 seq_puts(m, "   ");
324                 describe_obj(m, obj);
325                 seq_putc(m, '\n');
326                 total_obj_size += obj->base.size;
327                 total_gtt_size += i915_gem_obj_ggtt_size(obj);
328                 count++;
329         }
330
331         mutex_unlock(&dev->struct_mutex);
332
333         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
334                    count, total_obj_size, total_gtt_size);
335
336         return 0;
337 }
338
339 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
340 {
341         struct drm_info_node *node = (struct drm_info_node *) m->private;
342         struct drm_device *dev = node->minor->dev;
343         unsigned long flags;
344         struct intel_crtc *crtc;
345
346         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
347                 const char pipe = pipe_name(crtc->pipe);
348                 const char plane = plane_name(crtc->plane);
349                 struct intel_unpin_work *work;
350
351                 spin_lock_irqsave(&dev->event_lock, flags);
352                 work = crtc->unpin_work;
353                 if (work == NULL) {
354                         seq_printf(m, "No flip due on pipe %c (plane %c)\n",
355                                    pipe, plane);
356                 } else {
357                         if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
358                                 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
359                                            pipe, plane);
360                         } else {
361                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
362                                            pipe, plane);
363                         }
364                         if (work->enable_stall_check)
365                                 seq_puts(m, "Stall check enabled, ");
366                         else
367                                 seq_puts(m, "Stall check waiting for page flip ioctl, ");
368                         seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
369
370                         if (work->old_fb_obj) {
371                                 struct drm_i915_gem_object *obj = work->old_fb_obj;
372                                 if (obj)
373                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
374                                                    i915_gem_obj_ggtt_offset(obj));
375                         }
376                         if (work->pending_flip_obj) {
377                                 struct drm_i915_gem_object *obj = work->pending_flip_obj;
378                                 if (obj)
379                                         seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
380                                                    i915_gem_obj_ggtt_offset(obj));
381                         }
382                 }
383                 spin_unlock_irqrestore(&dev->event_lock, flags);
384         }
385
386         return 0;
387 }
388
389 static int i915_gem_request_info(struct seq_file *m, void *data)
390 {
391         struct drm_info_node *node = (struct drm_info_node *) m->private;
392         struct drm_device *dev = node->minor->dev;
393         drm_i915_private_t *dev_priv = dev->dev_private;
394         struct intel_ring_buffer *ring;
395         struct drm_i915_gem_request *gem_request;
396         int ret, count, i;
397
398         ret = mutex_lock_interruptible(&dev->struct_mutex);
399         if (ret)
400                 return ret;
401
402         count = 0;
403         for_each_ring(ring, dev_priv, i) {
404                 if (list_empty(&ring->request_list))
405                         continue;
406
407                 seq_printf(m, "%s requests:\n", ring->name);
408                 list_for_each_entry(gem_request,
409                                     &ring->request_list,
410                                     list) {
411                         seq_printf(m, "    %d @ %d\n",
412                                    gem_request->seqno,
413                                    (int) (jiffies - gem_request->emitted_jiffies));
414                 }
415                 count++;
416         }
417         mutex_unlock(&dev->struct_mutex);
418
419         if (count == 0)
420                 seq_puts(m, "No requests\n");
421
422         return 0;
423 }
424
425 static void i915_ring_seqno_info(struct seq_file *m,
426                                  struct intel_ring_buffer *ring)
427 {
428         if (ring->get_seqno) {
429                 seq_printf(m, "Current sequence (%s): %u\n",
430                            ring->name, ring->get_seqno(ring, false));
431         }
432 }
433
434 static int i915_gem_seqno_info(struct seq_file *m, void *data)
435 {
436         struct drm_info_node *node = (struct drm_info_node *) m->private;
437         struct drm_device *dev = node->minor->dev;
438         drm_i915_private_t *dev_priv = dev->dev_private;
439         struct intel_ring_buffer *ring;
440         int ret, i;
441
442         ret = mutex_lock_interruptible(&dev->struct_mutex);
443         if (ret)
444                 return ret;
445
446         for_each_ring(ring, dev_priv, i)
447                 i915_ring_seqno_info(m, ring);
448
449         mutex_unlock(&dev->struct_mutex);
450
451         return 0;
452 }
453
454
455 static int i915_interrupt_info(struct seq_file *m, void *data)
456 {
457         struct drm_info_node *node = (struct drm_info_node *) m->private;
458         struct drm_device *dev = node->minor->dev;
459         drm_i915_private_t *dev_priv = dev->dev_private;
460         struct intel_ring_buffer *ring;
461         int ret, i, pipe;
462
463         ret = mutex_lock_interruptible(&dev->struct_mutex);
464         if (ret)
465                 return ret;
466
467         if (IS_VALLEYVIEW(dev)) {
468                 seq_printf(m, "Display IER:\t%08x\n",
469                            I915_READ(VLV_IER));
470                 seq_printf(m, "Display IIR:\t%08x\n",
471                            I915_READ(VLV_IIR));
472                 seq_printf(m, "Display IIR_RW:\t%08x\n",
473                            I915_READ(VLV_IIR_RW));
474                 seq_printf(m, "Display IMR:\t%08x\n",
475                            I915_READ(VLV_IMR));
476                 for_each_pipe(pipe)
477                         seq_printf(m, "Pipe %c stat:\t%08x\n",
478                                    pipe_name(pipe),
479                                    I915_READ(PIPESTAT(pipe)));
480
481                 seq_printf(m, "Master IER:\t%08x\n",
482                            I915_READ(VLV_MASTER_IER));
483
484                 seq_printf(m, "Render IER:\t%08x\n",
485                            I915_READ(GTIER));
486                 seq_printf(m, "Render IIR:\t%08x\n",
487                            I915_READ(GTIIR));
488                 seq_printf(m, "Render IMR:\t%08x\n",
489                            I915_READ(GTIMR));
490
491                 seq_printf(m, "PM IER:\t\t%08x\n",
492                            I915_READ(GEN6_PMIER));
493                 seq_printf(m, "PM IIR:\t\t%08x\n",
494                            I915_READ(GEN6_PMIIR));
495                 seq_printf(m, "PM IMR:\t\t%08x\n",
496                            I915_READ(GEN6_PMIMR));
497
498                 seq_printf(m, "Port hotplug:\t%08x\n",
499                            I915_READ(PORT_HOTPLUG_EN));
500                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
501                            I915_READ(VLV_DPFLIPSTAT));
502                 seq_printf(m, "DPINVGTT:\t%08x\n",
503                            I915_READ(DPINVGTT));
504
505         } else if (!HAS_PCH_SPLIT(dev)) {
506                 seq_printf(m, "Interrupt enable:    %08x\n",
507                            I915_READ(IER));
508                 seq_printf(m, "Interrupt identity:  %08x\n",
509                            I915_READ(IIR));
510                 seq_printf(m, "Interrupt mask:      %08x\n",
511                            I915_READ(IMR));
512                 for_each_pipe(pipe)
513                         seq_printf(m, "Pipe %c stat:         %08x\n",
514                                    pipe_name(pipe),
515                                    I915_READ(PIPESTAT(pipe)));
516         } else {
517                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
518                            I915_READ(DEIER));
519                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
520                            I915_READ(DEIIR));
521                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
522                            I915_READ(DEIMR));
523                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
524                            I915_READ(SDEIER));
525                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
526                            I915_READ(SDEIIR));
527                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
528                            I915_READ(SDEIMR));
529                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
530                            I915_READ(GTIER));
531                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
532                            I915_READ(GTIIR));
533                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
534                            I915_READ(GTIMR));
535         }
536         seq_printf(m, "Interrupts received: %d\n",
537                    atomic_read(&dev_priv->irq_received));
538         for_each_ring(ring, dev_priv, i) {
539                 if (IS_GEN6(dev) || IS_GEN7(dev)) {
540                         seq_printf(m,
541                                    "Graphics Interrupt mask (%s):       %08x\n",
542                                    ring->name, I915_READ_IMR(ring));
543                 }
544                 i915_ring_seqno_info(m, ring);
545         }
546         mutex_unlock(&dev->struct_mutex);
547
548         return 0;
549 }
550
551 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
552 {
553         struct drm_info_node *node = (struct drm_info_node *) m->private;
554         struct drm_device *dev = node->minor->dev;
555         drm_i915_private_t *dev_priv = dev->dev_private;
556         int i, ret;
557
558         ret = mutex_lock_interruptible(&dev->struct_mutex);
559         if (ret)
560                 return ret;
561
562         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
563         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
564         for (i = 0; i < dev_priv->num_fence_regs; i++) {
565                 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
566
567                 seq_printf(m, "Fence %d, pin count = %d, object = ",
568                            i, dev_priv->fence_regs[i].pin_count);
569                 if (obj == NULL)
570                         seq_puts(m, "unused");
571                 else
572                         describe_obj(m, obj);
573                 seq_putc(m, '\n');
574         }
575
576         mutex_unlock(&dev->struct_mutex);
577         return 0;
578 }
579
580 static int i915_hws_info(struct seq_file *m, void *data)
581 {
582         struct drm_info_node *node = (struct drm_info_node *) m->private;
583         struct drm_device *dev = node->minor->dev;
584         drm_i915_private_t *dev_priv = dev->dev_private;
585         struct intel_ring_buffer *ring;
586         const u32 *hws;
587         int i;
588
589         ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
590         hws = ring->status_page.page_addr;
591         if (hws == NULL)
592                 return 0;
593
594         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
595                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
596                            i * 4,
597                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
598         }
599         return 0;
600 }
601
602 static ssize_t
603 i915_error_state_write(struct file *filp,
604                        const char __user *ubuf,
605                        size_t cnt,
606                        loff_t *ppos)
607 {
608         struct i915_error_state_file_priv *error_priv = filp->private_data;
609         struct drm_device *dev = error_priv->dev;
610         int ret;
611
612         DRM_DEBUG_DRIVER("Resetting error state\n");
613
614         ret = mutex_lock_interruptible(&dev->struct_mutex);
615         if (ret)
616                 return ret;
617
618         i915_destroy_error_state(dev);
619         mutex_unlock(&dev->struct_mutex);
620
621         return cnt;
622 }
623
624 static int i915_error_state_open(struct inode *inode, struct file *file)
625 {
626         struct drm_device *dev = inode->i_private;
627         struct i915_error_state_file_priv *error_priv;
628
629         error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
630         if (!error_priv)
631                 return -ENOMEM;
632
633         error_priv->dev = dev;
634
635         i915_error_state_get(dev, error_priv);
636
637         file->private_data = error_priv;
638
639         return 0;
640 }
641
642 static int i915_error_state_release(struct inode *inode, struct file *file)
643 {
644         struct i915_error_state_file_priv *error_priv = file->private_data;
645
646         i915_error_state_put(error_priv);
647         kfree(error_priv);
648
649         return 0;
650 }
651
652 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
653                                      size_t count, loff_t *pos)
654 {
655         struct i915_error_state_file_priv *error_priv = file->private_data;
656         struct drm_i915_error_state_buf error_str;
657         loff_t tmp_pos = 0;
658         ssize_t ret_count = 0;
659         int ret;
660
661         ret = i915_error_state_buf_init(&error_str, count, *pos);
662         if (ret)
663                 return ret;
664
665         ret = i915_error_state_to_str(&error_str, error_priv);
666         if (ret)
667                 goto out;
668
669         ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
670                                             error_str.buf,
671                                             error_str.bytes);
672
673         if (ret_count < 0)
674                 ret = ret_count;
675         else
676                 *pos = error_str.start + ret_count;
677 out:
678         i915_error_state_buf_release(&error_str);
679         return ret ?: ret_count;
680 }
681
682 static const struct file_operations i915_error_state_fops = {
683         .owner = THIS_MODULE,
684         .open = i915_error_state_open,
685         .read = i915_error_state_read,
686         .write = i915_error_state_write,
687         .llseek = default_llseek,
688         .release = i915_error_state_release,
689 };
690
691 static int
692 i915_next_seqno_get(void *data, u64 *val)
693 {
694         struct drm_device *dev = data;
695         drm_i915_private_t *dev_priv = dev->dev_private;
696         int ret;
697
698         ret = mutex_lock_interruptible(&dev->struct_mutex);
699         if (ret)
700                 return ret;
701
702         *val = dev_priv->next_seqno;
703         mutex_unlock(&dev->struct_mutex);
704
705         return 0;
706 }
707
708 static int
709 i915_next_seqno_set(void *data, u64 val)
710 {
711         struct drm_device *dev = data;
712         int ret;
713
714         ret = mutex_lock_interruptible(&dev->struct_mutex);
715         if (ret)
716                 return ret;
717
718         ret = i915_gem_set_seqno(dev, val);
719         mutex_unlock(&dev->struct_mutex);
720
721         return ret;
722 }
723
724 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
725                         i915_next_seqno_get, i915_next_seqno_set,
726                         "0x%llx\n");
727
728 static int i915_rstdby_delays(struct seq_file *m, void *unused)
729 {
730         struct drm_info_node *node = (struct drm_info_node *) m->private;
731         struct drm_device *dev = node->minor->dev;
732         drm_i915_private_t *dev_priv = dev->dev_private;
733         u16 crstanddelay;
734         int ret;
735
736         ret = mutex_lock_interruptible(&dev->struct_mutex);
737         if (ret)
738                 return ret;
739
740         crstanddelay = I915_READ16(CRSTANDVID);
741
742         mutex_unlock(&dev->struct_mutex);
743
744         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
745
746         return 0;
747 }
748
749 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
750 {
751         struct drm_info_node *node = (struct drm_info_node *) m->private;
752         struct drm_device *dev = node->minor->dev;
753         drm_i915_private_t *dev_priv = dev->dev_private;
754         int ret;
755
756         if (IS_GEN5(dev)) {
757                 u16 rgvswctl = I915_READ16(MEMSWCTL);
758                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
759
760                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
761                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
762                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
763                            MEMSTAT_VID_SHIFT);
764                 seq_printf(m, "Current P-state: %d\n",
765                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
766         } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
767                 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
768                 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
769                 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
770                 u32 rpstat, cagf;
771                 u32 rpupei, rpcurup, rpprevup;
772                 u32 rpdownei, rpcurdown, rpprevdown;
773                 int max_freq;
774
775                 /* RPSTAT1 is in the GT power well */
776                 ret = mutex_lock_interruptible(&dev->struct_mutex);
777                 if (ret)
778                         return ret;
779
780                 gen6_gt_force_wake_get(dev_priv);
781
782                 rpstat = I915_READ(GEN6_RPSTAT1);
783                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
784                 rpcurup = I915_READ(GEN6_RP_CUR_UP);
785                 rpprevup = I915_READ(GEN6_RP_PREV_UP);
786                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
787                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
788                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
789                 if (IS_HASWELL(dev))
790                         cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
791                 else
792                         cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
793                 cagf *= GT_FREQUENCY_MULTIPLIER;
794
795                 gen6_gt_force_wake_put(dev_priv);
796                 mutex_unlock(&dev->struct_mutex);
797
798                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
799                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
800                 seq_printf(m, "Render p-state ratio: %d\n",
801                            (gt_perf_status & 0xff00) >> 8);
802                 seq_printf(m, "Render p-state VID: %d\n",
803                            gt_perf_status & 0xff);
804                 seq_printf(m, "Render p-state limit: %d\n",
805                            rp_state_limits & 0xff);
806                 seq_printf(m, "CAGF: %dMHz\n", cagf);
807                 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
808                            GEN6_CURICONT_MASK);
809                 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
810                            GEN6_CURBSYTAVG_MASK);
811                 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
812                            GEN6_CURBSYTAVG_MASK);
813                 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
814                            GEN6_CURIAVG_MASK);
815                 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
816                            GEN6_CURBSYTAVG_MASK);
817                 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
818                            GEN6_CURBSYTAVG_MASK);
819
820                 max_freq = (rp_state_cap & 0xff0000) >> 16;
821                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
822                            max_freq * GT_FREQUENCY_MULTIPLIER);
823
824                 max_freq = (rp_state_cap & 0xff00) >> 8;
825                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
826                            max_freq * GT_FREQUENCY_MULTIPLIER);
827
828                 max_freq = rp_state_cap & 0xff;
829                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
830                            max_freq * GT_FREQUENCY_MULTIPLIER);
831
832                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
833                            dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
834         } else if (IS_VALLEYVIEW(dev)) {
835                 u32 freq_sts, val;
836
837                 mutex_lock(&dev_priv->rps.hw_lock);
838                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
839                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
840                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
841
842                 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
843                 seq_printf(m, "max GPU freq: %d MHz\n",
844                            vlv_gpu_freq(dev_priv->mem_freq, val));
845
846                 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
847                 seq_printf(m, "min GPU freq: %d MHz\n",
848                            vlv_gpu_freq(dev_priv->mem_freq, val));
849
850                 seq_printf(m, "current GPU freq: %d MHz\n",
851                            vlv_gpu_freq(dev_priv->mem_freq,
852                                         (freq_sts >> 8) & 0xff));
853                 mutex_unlock(&dev_priv->rps.hw_lock);
854         } else {
855                 seq_puts(m, "no P-state info available\n");
856         }
857
858         return 0;
859 }
860
861 static int i915_delayfreq_table(struct seq_file *m, void *unused)
862 {
863         struct drm_info_node *node = (struct drm_info_node *) m->private;
864         struct drm_device *dev = node->minor->dev;
865         drm_i915_private_t *dev_priv = dev->dev_private;
866         u32 delayfreq;
867         int ret, i;
868
869         ret = mutex_lock_interruptible(&dev->struct_mutex);
870         if (ret)
871                 return ret;
872
873         for (i = 0; i < 16; i++) {
874                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
875                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
876                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
877         }
878
879         mutex_unlock(&dev->struct_mutex);
880
881         return 0;
882 }
883
884 static inline int MAP_TO_MV(int map)
885 {
886         return 1250 - (map * 25);
887 }
888
889 static int i915_inttoext_table(struct seq_file *m, void *unused)
890 {
891         struct drm_info_node *node = (struct drm_info_node *) m->private;
892         struct drm_device *dev = node->minor->dev;
893         drm_i915_private_t *dev_priv = dev->dev_private;
894         u32 inttoext;
895         int ret, i;
896
897         ret = mutex_lock_interruptible(&dev->struct_mutex);
898         if (ret)
899                 return ret;
900
901         for (i = 1; i <= 32; i++) {
902                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
903                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
904         }
905
906         mutex_unlock(&dev->struct_mutex);
907
908         return 0;
909 }
910
911 static int ironlake_drpc_info(struct seq_file *m)
912 {
913         struct drm_info_node *node = (struct drm_info_node *) m->private;
914         struct drm_device *dev = node->minor->dev;
915         drm_i915_private_t *dev_priv = dev->dev_private;
916         u32 rgvmodectl, rstdbyctl;
917         u16 crstandvid;
918         int ret;
919
920         ret = mutex_lock_interruptible(&dev->struct_mutex);
921         if (ret)
922                 return ret;
923
924         rgvmodectl = I915_READ(MEMMODECTL);
925         rstdbyctl = I915_READ(RSTDBYCTL);
926         crstandvid = I915_READ16(CRSTANDVID);
927
928         mutex_unlock(&dev->struct_mutex);
929
930         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
931                    "yes" : "no");
932         seq_printf(m, "Boost freq: %d\n",
933                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
934                    MEMMODE_BOOST_FREQ_SHIFT);
935         seq_printf(m, "HW control enabled: %s\n",
936                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
937         seq_printf(m, "SW control enabled: %s\n",
938                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
939         seq_printf(m, "Gated voltage change: %s\n",
940                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
941         seq_printf(m, "Starting frequency: P%d\n",
942                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
943         seq_printf(m, "Max P-state: P%d\n",
944                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
945         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
946         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
947         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
948         seq_printf(m, "Render standby enabled: %s\n",
949                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
950         seq_puts(m, "Current RS state: ");
951         switch (rstdbyctl & RSX_STATUS_MASK) {
952         case RSX_STATUS_ON:
953                 seq_puts(m, "on\n");
954                 break;
955         case RSX_STATUS_RC1:
956                 seq_puts(m, "RC1\n");
957                 break;
958         case RSX_STATUS_RC1E:
959                 seq_puts(m, "RC1E\n");
960                 break;
961         case RSX_STATUS_RS1:
962                 seq_puts(m, "RS1\n");
963                 break;
964         case RSX_STATUS_RS2:
965                 seq_puts(m, "RS2 (RC6)\n");
966                 break;
967         case RSX_STATUS_RS3:
968                 seq_puts(m, "RC3 (RC6+)\n");
969                 break;
970         default:
971                 seq_puts(m, "unknown\n");
972                 break;
973         }
974
975         return 0;
976 }
977
978 static int gen6_drpc_info(struct seq_file *m)
979 {
980
981         struct drm_info_node *node = (struct drm_info_node *) m->private;
982         struct drm_device *dev = node->minor->dev;
983         struct drm_i915_private *dev_priv = dev->dev_private;
984         u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
985         unsigned forcewake_count;
986         int count = 0, ret;
987
988         ret = mutex_lock_interruptible(&dev->struct_mutex);
989         if (ret)
990                 return ret;
991
992         spin_lock_irq(&dev_priv->gt_lock);
993         forcewake_count = dev_priv->forcewake_count;
994         spin_unlock_irq(&dev_priv->gt_lock);
995
996         if (forcewake_count) {
997                 seq_puts(m, "RC information inaccurate because somebody "
998                             "holds a forcewake reference \n");
999         } else {
1000                 /* NB: we cannot use forcewake, else we read the wrong values */
1001                 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1002                         udelay(10);
1003                 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1004         }
1005
1006         gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1007         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1008
1009         rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1010         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1011         mutex_unlock(&dev->struct_mutex);
1012         mutex_lock(&dev_priv->rps.hw_lock);
1013         sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1014         mutex_unlock(&dev_priv->rps.hw_lock);
1015
1016         seq_printf(m, "Video Turbo Mode: %s\n",
1017                    yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1018         seq_printf(m, "HW control enabled: %s\n",
1019                    yesno(rpmodectl1 & GEN6_RP_ENABLE));
1020         seq_printf(m, "SW control enabled: %s\n",
1021                    yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1022                           GEN6_RP_MEDIA_SW_MODE));
1023         seq_printf(m, "RC1e Enabled: %s\n",
1024                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1025         seq_printf(m, "RC6 Enabled: %s\n",
1026                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1027         seq_printf(m, "Deep RC6 Enabled: %s\n",
1028                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1029         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1030                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1031         seq_puts(m, "Current RC state: ");
1032         switch (gt_core_status & GEN6_RCn_MASK) {
1033         case GEN6_RC0:
1034                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1035                         seq_puts(m, "Core Power Down\n");
1036                 else
1037                         seq_puts(m, "on\n");
1038                 break;
1039         case GEN6_RC3:
1040                 seq_puts(m, "RC3\n");
1041                 break;
1042         case GEN6_RC6:
1043                 seq_puts(m, "RC6\n");
1044                 break;
1045         case GEN6_RC7:
1046                 seq_puts(m, "RC7\n");
1047                 break;
1048         default:
1049                 seq_puts(m, "Unknown\n");
1050                 break;
1051         }
1052
1053         seq_printf(m, "Core Power Down: %s\n",
1054                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1055
1056         /* Not exactly sure what this is */
1057         seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1058                    I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1059         seq_printf(m, "RC6 residency since boot: %u\n",
1060                    I915_READ(GEN6_GT_GFX_RC6));
1061         seq_printf(m, "RC6+ residency since boot: %u\n",
1062                    I915_READ(GEN6_GT_GFX_RC6p));
1063         seq_printf(m, "RC6++ residency since boot: %u\n",
1064                    I915_READ(GEN6_GT_GFX_RC6pp));
1065
1066         seq_printf(m, "RC6   voltage: %dmV\n",
1067                    GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1068         seq_printf(m, "RC6+  voltage: %dmV\n",
1069                    GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1070         seq_printf(m, "RC6++ voltage: %dmV\n",
1071                    GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1072         return 0;
1073 }
1074
1075 static int i915_drpc_info(struct seq_file *m, void *unused)
1076 {
1077         struct drm_info_node *node = (struct drm_info_node *) m->private;
1078         struct drm_device *dev = node->minor->dev;
1079
1080         if (IS_GEN6(dev) || IS_GEN7(dev))
1081                 return gen6_drpc_info(m);
1082         else
1083                 return ironlake_drpc_info(m);
1084 }
1085
1086 static int i915_fbc_status(struct seq_file *m, void *unused)
1087 {
1088         struct drm_info_node *node = (struct drm_info_node *) m->private;
1089         struct drm_device *dev = node->minor->dev;
1090         drm_i915_private_t *dev_priv = dev->dev_private;
1091
1092         if (!I915_HAS_FBC(dev)) {
1093                 seq_puts(m, "FBC unsupported on this chipset\n");
1094                 return 0;
1095         }
1096
1097         if (intel_fbc_enabled(dev)) {
1098                 seq_puts(m, "FBC enabled\n");
1099         } else {
1100                 seq_puts(m, "FBC disabled: ");
1101                 switch (dev_priv->fbc.no_fbc_reason) {
1102                 case FBC_NO_OUTPUT:
1103                         seq_puts(m, "no outputs");
1104                         break;
1105                 case FBC_STOLEN_TOO_SMALL:
1106                         seq_puts(m, "not enough stolen memory");
1107                         break;
1108                 case FBC_UNSUPPORTED_MODE:
1109                         seq_puts(m, "mode not supported");
1110                         break;
1111                 case FBC_MODE_TOO_LARGE:
1112                         seq_puts(m, "mode too large");
1113                         break;
1114                 case FBC_BAD_PLANE:
1115                         seq_puts(m, "FBC unsupported on plane");
1116                         break;
1117                 case FBC_NOT_TILED:
1118                         seq_puts(m, "scanout buffer not tiled");
1119                         break;
1120                 case FBC_MULTIPLE_PIPES:
1121                         seq_puts(m, "multiple pipes are enabled");
1122                         break;
1123                 case FBC_MODULE_PARAM:
1124                         seq_puts(m, "disabled per module param (default off)");
1125                         break;
1126                 case FBC_CHIP_DEFAULT:
1127                         seq_puts(m, "disabled per chip default");
1128                         break;
1129                 default:
1130                         seq_puts(m, "unknown reason");
1131                 }
1132                 seq_putc(m, '\n');
1133         }
1134         return 0;
1135 }
1136
1137 static int i915_ips_status(struct seq_file *m, void *unused)
1138 {
1139         struct drm_info_node *node = (struct drm_info_node *) m->private;
1140         struct drm_device *dev = node->minor->dev;
1141         struct drm_i915_private *dev_priv = dev->dev_private;
1142
1143         if (!HAS_IPS(dev)) {
1144                 seq_puts(m, "not supported\n");
1145                 return 0;
1146         }
1147
1148         if (I915_READ(IPS_CTL) & IPS_ENABLE)
1149                 seq_puts(m, "enabled\n");
1150         else
1151                 seq_puts(m, "disabled\n");
1152
1153         return 0;
1154 }
1155
1156 static int i915_sr_status(struct seq_file *m, void *unused)
1157 {
1158         struct drm_info_node *node = (struct drm_info_node *) m->private;
1159         struct drm_device *dev = node->minor->dev;
1160         drm_i915_private_t *dev_priv = dev->dev_private;
1161         bool sr_enabled = false;
1162
1163         if (HAS_PCH_SPLIT(dev))
1164                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1165         else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1166                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1167         else if (IS_I915GM(dev))
1168                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1169         else if (IS_PINEVIEW(dev))
1170                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1171
1172         seq_printf(m, "self-refresh: %s\n",
1173                    sr_enabled ? "enabled" : "disabled");
1174
1175         return 0;
1176 }
1177
1178 static int i915_emon_status(struct seq_file *m, void *unused)
1179 {
1180         struct drm_info_node *node = (struct drm_info_node *) m->private;
1181         struct drm_device *dev = node->minor->dev;
1182         drm_i915_private_t *dev_priv = dev->dev_private;
1183         unsigned long temp, chipset, gfx;
1184         int ret;
1185
1186         if (!IS_GEN5(dev))
1187                 return -ENODEV;
1188
1189         ret = mutex_lock_interruptible(&dev->struct_mutex);
1190         if (ret)
1191                 return ret;
1192
1193         temp = i915_mch_val(dev_priv);
1194         chipset = i915_chipset_val(dev_priv);
1195         gfx = i915_gfx_val(dev_priv);
1196         mutex_unlock(&dev->struct_mutex);
1197
1198         seq_printf(m, "GMCH temp: %ld\n", temp);
1199         seq_printf(m, "Chipset power: %ld\n", chipset);
1200         seq_printf(m, "GFX power: %ld\n", gfx);
1201         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1202
1203         return 0;
1204 }
1205
1206 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1207 {
1208         struct drm_info_node *node = (struct drm_info_node *) m->private;
1209         struct drm_device *dev = node->minor->dev;
1210         drm_i915_private_t *dev_priv = dev->dev_private;
1211         int ret;
1212         int gpu_freq, ia_freq;
1213
1214         if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1215                 seq_puts(m, "unsupported on this chipset\n");
1216                 return 0;
1217         }
1218
1219         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1220         if (ret)
1221                 return ret;
1222
1223         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1224
1225         for (gpu_freq = dev_priv->rps.min_delay;
1226              gpu_freq <= dev_priv->rps.max_delay;
1227              gpu_freq++) {
1228                 ia_freq = gpu_freq;
1229                 sandybridge_pcode_read(dev_priv,
1230                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1231                                        &ia_freq);
1232                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1233                            gpu_freq * GT_FREQUENCY_MULTIPLIER,
1234                            ((ia_freq >> 0) & 0xff) * 100,
1235                            ((ia_freq >> 8) & 0xff) * 100);
1236         }
1237
1238         mutex_unlock(&dev_priv->rps.hw_lock);
1239
1240         return 0;
1241 }
1242
1243 static int i915_gfxec(struct seq_file *m, void *unused)
1244 {
1245         struct drm_info_node *node = (struct drm_info_node *) m->private;
1246         struct drm_device *dev = node->minor->dev;
1247         drm_i915_private_t *dev_priv = dev->dev_private;
1248         int ret;
1249
1250         ret = mutex_lock_interruptible(&dev->struct_mutex);
1251         if (ret)
1252                 return ret;
1253
1254         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1255
1256         mutex_unlock(&dev->struct_mutex);
1257
1258         return 0;
1259 }
1260
1261 static int i915_opregion(struct seq_file *m, void *unused)
1262 {
1263         struct drm_info_node *node = (struct drm_info_node *) m->private;
1264         struct drm_device *dev = node->minor->dev;
1265         drm_i915_private_t *dev_priv = dev->dev_private;
1266         struct intel_opregion *opregion = &dev_priv->opregion;
1267         void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1268         int ret;
1269
1270         if (data == NULL)
1271                 return -ENOMEM;
1272
1273         ret = mutex_lock_interruptible(&dev->struct_mutex);
1274         if (ret)
1275                 goto out;
1276
1277         if (opregion->header) {
1278                 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1279                 seq_write(m, data, OPREGION_SIZE);
1280         }
1281
1282         mutex_unlock(&dev->struct_mutex);
1283
1284 out:
1285         kfree(data);
1286         return 0;
1287 }
1288
1289 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1290 {
1291         struct drm_info_node *node = (struct drm_info_node *) m->private;
1292         struct drm_device *dev = node->minor->dev;
1293         drm_i915_private_t *dev_priv = dev->dev_private;
1294         struct intel_fbdev *ifbdev;
1295         struct intel_framebuffer *fb;
1296         int ret;
1297
1298         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1299         if (ret)
1300                 return ret;
1301
1302         ifbdev = dev_priv->fbdev;
1303         fb = to_intel_framebuffer(ifbdev->helper.fb);
1304
1305         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1306                    fb->base.width,
1307                    fb->base.height,
1308                    fb->base.depth,
1309                    fb->base.bits_per_pixel,
1310                    atomic_read(&fb->base.refcount.refcount));
1311         describe_obj(m, fb->obj);
1312         seq_putc(m, '\n');
1313         mutex_unlock(&dev->mode_config.mutex);
1314
1315         mutex_lock(&dev->mode_config.fb_lock);
1316         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1317                 if (&fb->base == ifbdev->helper.fb)
1318                         continue;
1319
1320                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1321                            fb->base.width,
1322                            fb->base.height,
1323                            fb->base.depth,
1324                            fb->base.bits_per_pixel,
1325                            atomic_read(&fb->base.refcount.refcount));
1326                 describe_obj(m, fb->obj);
1327                 seq_putc(m, '\n');
1328         }
1329         mutex_unlock(&dev->mode_config.fb_lock);
1330
1331         return 0;
1332 }
1333
1334 static int i915_context_status(struct seq_file *m, void *unused)
1335 {
1336         struct drm_info_node *node = (struct drm_info_node *) m->private;
1337         struct drm_device *dev = node->minor->dev;
1338         drm_i915_private_t *dev_priv = dev->dev_private;
1339         struct intel_ring_buffer *ring;
1340         int ret, i;
1341
1342         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1343         if (ret)
1344                 return ret;
1345
1346         if (dev_priv->ips.pwrctx) {
1347                 seq_puts(m, "power context ");
1348                 describe_obj(m, dev_priv->ips.pwrctx);
1349                 seq_putc(m, '\n');
1350         }
1351
1352         if (dev_priv->ips.renderctx) {
1353                 seq_puts(m, "render context ");
1354                 describe_obj(m, dev_priv->ips.renderctx);
1355                 seq_putc(m, '\n');
1356         }
1357
1358         for_each_ring(ring, dev_priv, i) {
1359                 if (ring->default_context) {
1360                         seq_printf(m, "HW default context %s ring ", ring->name);
1361                         describe_obj(m, ring->default_context->obj);
1362                         seq_putc(m, '\n');
1363                 }
1364         }
1365
1366         mutex_unlock(&dev->mode_config.mutex);
1367
1368         return 0;
1369 }
1370
1371 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1372 {
1373         struct drm_info_node *node = (struct drm_info_node *) m->private;
1374         struct drm_device *dev = node->minor->dev;
1375         struct drm_i915_private *dev_priv = dev->dev_private;
1376         unsigned forcewake_count;
1377
1378         spin_lock_irq(&dev_priv->gt_lock);
1379         forcewake_count = dev_priv->forcewake_count;
1380         spin_unlock_irq(&dev_priv->gt_lock);
1381
1382         seq_printf(m, "forcewake count = %u\n", forcewake_count);
1383
1384         return 0;
1385 }
1386
1387 static const char *swizzle_string(unsigned swizzle)
1388 {
1389         switch (swizzle) {
1390         case I915_BIT_6_SWIZZLE_NONE:
1391                 return "none";
1392         case I915_BIT_6_SWIZZLE_9:
1393                 return "bit9";
1394         case I915_BIT_6_SWIZZLE_9_10:
1395                 return "bit9/bit10";
1396         case I915_BIT_6_SWIZZLE_9_11:
1397                 return "bit9/bit11";
1398         case I915_BIT_6_SWIZZLE_9_10_11:
1399                 return "bit9/bit10/bit11";
1400         case I915_BIT_6_SWIZZLE_9_17:
1401                 return "bit9/bit17";
1402         case I915_BIT_6_SWIZZLE_9_10_17:
1403                 return "bit9/bit10/bit17";
1404         case I915_BIT_6_SWIZZLE_UNKNOWN:
1405                 return "unknown";
1406         }
1407
1408         return "bug";
1409 }
1410
1411 static int i915_swizzle_info(struct seq_file *m, void *data)
1412 {
1413         struct drm_info_node *node = (struct drm_info_node *) m->private;
1414         struct drm_device *dev = node->minor->dev;
1415         struct drm_i915_private *dev_priv = dev->dev_private;
1416         int ret;
1417
1418         ret = mutex_lock_interruptible(&dev->struct_mutex);
1419         if (ret)
1420                 return ret;
1421
1422         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1423                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1424         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1425                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1426
1427         if (IS_GEN3(dev) || IS_GEN4(dev)) {
1428                 seq_printf(m, "DDC = 0x%08x\n",
1429                            I915_READ(DCC));
1430                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1431                            I915_READ16(C0DRB3));
1432                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1433                            I915_READ16(C1DRB3));
1434         } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1435                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1436                            I915_READ(MAD_DIMM_C0));
1437                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1438                            I915_READ(MAD_DIMM_C1));
1439                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1440                            I915_READ(MAD_DIMM_C2));
1441                 seq_printf(m, "TILECTL = 0x%08x\n",
1442                            I915_READ(TILECTL));
1443                 seq_printf(m, "ARB_MODE = 0x%08x\n",
1444                            I915_READ(ARB_MODE));
1445                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1446                            I915_READ(DISP_ARB_CTL));
1447         }
1448         mutex_unlock(&dev->struct_mutex);
1449
1450         return 0;
1451 }
1452
1453 static int i915_ppgtt_info(struct seq_file *m, void *data)
1454 {
1455         struct drm_info_node *node = (struct drm_info_node *) m->private;
1456         struct drm_device *dev = node->minor->dev;
1457         struct drm_i915_private *dev_priv = dev->dev_private;
1458         struct intel_ring_buffer *ring;
1459         int i, ret;
1460
1461
1462         ret = mutex_lock_interruptible(&dev->struct_mutex);
1463         if (ret)
1464                 return ret;
1465         if (INTEL_INFO(dev)->gen == 6)
1466                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1467
1468         for_each_ring(ring, dev_priv, i) {
1469                 seq_printf(m, "%s\n", ring->name);
1470                 if (INTEL_INFO(dev)->gen == 7)
1471                         seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1472                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1473                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1474                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1475         }
1476         if (dev_priv->mm.aliasing_ppgtt) {
1477                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1478
1479                 seq_puts(m, "aliasing PPGTT:\n");
1480                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1481         }
1482         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1483         mutex_unlock(&dev->struct_mutex);
1484
1485         return 0;
1486 }
1487
1488 static int i915_dpio_info(struct seq_file *m, void *data)
1489 {
1490         struct drm_info_node *node = (struct drm_info_node *) m->private;
1491         struct drm_device *dev = node->minor->dev;
1492         struct drm_i915_private *dev_priv = dev->dev_private;
1493         int ret;
1494
1495
1496         if (!IS_VALLEYVIEW(dev)) {
1497                 seq_puts(m, "unsupported\n");
1498                 return 0;
1499         }
1500
1501         ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1502         if (ret)
1503                 return ret;
1504
1505         seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1506
1507         seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1508                    vlv_dpio_read(dev_priv, _DPIO_DIV_A));
1509         seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1510                    vlv_dpio_read(dev_priv, _DPIO_DIV_B));
1511
1512         seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1513                    vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
1514         seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1515                    vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
1516
1517         seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1518                    vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1519         seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1520                    vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1521
1522         seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1523                    vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
1524         seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1525                    vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
1526
1527         seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1528                    vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1529
1530         mutex_unlock(&dev_priv->dpio_lock);
1531
1532         return 0;
1533 }
1534
1535 static int i915_llc(struct seq_file *m, void *data)
1536 {
1537         struct drm_info_node *node = (struct drm_info_node *) m->private;
1538         struct drm_device *dev = node->minor->dev;
1539         struct drm_i915_private *dev_priv = dev->dev_private;
1540
1541         /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1542         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1543         seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1544
1545         return 0;
1546 }
1547
1548 static int i915_edp_psr_status(struct seq_file *m, void *data)
1549 {
1550         struct drm_info_node *node = m->private;
1551         struct drm_device *dev = node->minor->dev;
1552         struct drm_i915_private *dev_priv = dev->dev_private;
1553         u32 psrstat, psrperf;
1554
1555         if (!IS_HASWELL(dev)) {
1556                 seq_puts(m, "PSR not supported on this platform\n");
1557         } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1558                 seq_puts(m, "PSR enabled\n");
1559         } else {
1560                 seq_puts(m, "PSR disabled: ");
1561                 switch (dev_priv->no_psr_reason) {
1562                 case PSR_NO_SOURCE:
1563                         seq_puts(m, "not supported on this platform");
1564                         break;
1565                 case PSR_NO_SINK:
1566                         seq_puts(m, "not supported by panel");
1567                         break;
1568                 case PSR_MODULE_PARAM:
1569                         seq_puts(m, "disabled by flag");
1570                         break;
1571                 case PSR_CRTC_NOT_ACTIVE:
1572                         seq_puts(m, "crtc not active");
1573                         break;
1574                 case PSR_PWR_WELL_ENABLED:
1575                         seq_puts(m, "power well enabled");
1576                         break;
1577                 case PSR_NOT_TILED:
1578                         seq_puts(m, "not tiled");
1579                         break;
1580                 case PSR_SPRITE_ENABLED:
1581                         seq_puts(m, "sprite enabled");
1582                         break;
1583                 case PSR_S3D_ENABLED:
1584                         seq_puts(m, "stereo 3d enabled");
1585                         break;
1586                 case PSR_INTERLACED_ENABLED:
1587                         seq_puts(m, "interlaced enabled");
1588                         break;
1589                 case PSR_HSW_NOT_DDIA:
1590                         seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1591                         break;
1592                 default:
1593                         seq_puts(m, "unknown reason");
1594                 }
1595                 seq_puts(m, "\n");
1596                 return 0;
1597         }
1598
1599         psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1600
1601         seq_puts(m, "PSR Current State: ");
1602         switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1603         case EDP_PSR_STATUS_STATE_IDLE:
1604                 seq_puts(m, "Reset state\n");
1605                 break;
1606         case EDP_PSR_STATUS_STATE_SRDONACK:
1607                 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1608                 break;
1609         case EDP_PSR_STATUS_STATE_SRDENT:
1610                 seq_puts(m, "SRD entry\n");
1611                 break;
1612         case EDP_PSR_STATUS_STATE_BUFOFF:
1613                 seq_puts(m, "Wait for buffer turn off\n");
1614                 break;
1615         case EDP_PSR_STATUS_STATE_BUFON:
1616                 seq_puts(m, "Wait for buffer turn on\n");
1617                 break;
1618         case EDP_PSR_STATUS_STATE_AUXACK:
1619                 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1620                 break;
1621         case EDP_PSR_STATUS_STATE_SRDOFFACK:
1622                 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1623                 break;
1624         default:
1625                 seq_puts(m, "Unknown\n");
1626                 break;
1627         }
1628
1629         seq_puts(m, "Link Status: ");
1630         switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1631         case EDP_PSR_STATUS_LINK_FULL_OFF:
1632                 seq_puts(m, "Link is fully off\n");
1633                 break;
1634         case EDP_PSR_STATUS_LINK_FULL_ON:
1635                 seq_puts(m, "Link is fully on\n");
1636                 break;
1637         case EDP_PSR_STATUS_LINK_STANDBY:
1638                 seq_puts(m, "Link is in standby\n");
1639                 break;
1640         default:
1641                 seq_puts(m, "Unknown\n");
1642                 break;
1643         }
1644
1645         seq_printf(m, "PSR Entry Count: %u\n",
1646                    psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1647                    EDP_PSR_STATUS_COUNT_MASK);
1648
1649         seq_printf(m, "Max Sleep Timer Counter: %u\n",
1650                    psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1651                    EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1652
1653         seq_printf(m, "Had AUX error: %s\n",
1654                    yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1655
1656         seq_printf(m, "Sending AUX: %s\n",
1657                    yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1658
1659         seq_printf(m, "Sending Idle: %s\n",
1660                    yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
1661
1662         seq_printf(m, "Sending TP2 TP3: %s\n",
1663                    yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
1664
1665         seq_printf(m, "Sending TP1: %s\n",
1666                    yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1667
1668         seq_printf(m, "Idle Count: %u\n",
1669                    psrstat & EDP_PSR_STATUS_IDLE_MASK);
1670
1671         psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1672         seq_printf(m, "Performance Counter: %u\n", psrperf);
1673
1674         return 0;
1675 }
1676
1677 static int
1678 i915_wedged_get(void *data, u64 *val)
1679 {
1680         struct drm_device *dev = data;
1681         drm_i915_private_t *dev_priv = dev->dev_private;
1682
1683         *val = atomic_read(&dev_priv->gpu_error.reset_counter);
1684
1685         return 0;
1686 }
1687
1688 static int
1689 i915_wedged_set(void *data, u64 val)
1690 {
1691         struct drm_device *dev = data;
1692
1693         DRM_INFO("Manually setting wedged to %llu\n", val);
1694         i915_handle_error(dev, val);
1695
1696         return 0;
1697 }
1698
1699 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1700                         i915_wedged_get, i915_wedged_set,
1701                         "%llu\n");
1702
1703 static int
1704 i915_ring_stop_get(void *data, u64 *val)
1705 {
1706         struct drm_device *dev = data;
1707         drm_i915_private_t *dev_priv = dev->dev_private;
1708
1709         *val = dev_priv->gpu_error.stop_rings;
1710
1711         return 0;
1712 }
1713
1714 static int
1715 i915_ring_stop_set(void *data, u64 val)
1716 {
1717         struct drm_device *dev = data;
1718         struct drm_i915_private *dev_priv = dev->dev_private;
1719         int ret;
1720
1721         DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
1722
1723         ret = mutex_lock_interruptible(&dev->struct_mutex);
1724         if (ret)
1725                 return ret;
1726
1727         dev_priv->gpu_error.stop_rings = val;
1728         mutex_unlock(&dev->struct_mutex);
1729
1730         return 0;
1731 }
1732
1733 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1734                         i915_ring_stop_get, i915_ring_stop_set,
1735                         "0x%08llx\n");
1736
1737 #define DROP_UNBOUND 0x1
1738 #define DROP_BOUND 0x2
1739 #define DROP_RETIRE 0x4
1740 #define DROP_ACTIVE 0x8
1741 #define DROP_ALL (DROP_UNBOUND | \
1742                   DROP_BOUND | \
1743                   DROP_RETIRE | \
1744                   DROP_ACTIVE)
1745 static int
1746 i915_drop_caches_get(void *data, u64 *val)
1747 {
1748         *val = DROP_ALL;
1749
1750         return 0;
1751 }
1752
1753 static int
1754 i915_drop_caches_set(void *data, u64 val)
1755 {
1756         struct drm_device *dev = data;
1757         struct drm_i915_private *dev_priv = dev->dev_private;
1758         struct drm_i915_gem_object *obj, *next;
1759         struct i915_address_space *vm = &dev_priv->gtt.base;
1760         int ret;
1761
1762         DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
1763
1764         /* No need to check and wait for gpu resets, only libdrm auto-restarts
1765          * on ioctls on -EAGAIN. */
1766         ret = mutex_lock_interruptible(&dev->struct_mutex);
1767         if (ret)
1768                 return ret;
1769
1770         if (val & DROP_ACTIVE) {
1771                 ret = i915_gpu_idle(dev);
1772                 if (ret)
1773                         goto unlock;
1774         }
1775
1776         if (val & (DROP_RETIRE | DROP_ACTIVE))
1777                 i915_gem_retire_requests(dev);
1778
1779         if (val & DROP_BOUND) {
1780                 list_for_each_entry_safe(obj, next, &vm->inactive_list,
1781                                          mm_list)
1782                         if (obj->pin_count == 0) {
1783                                 ret = i915_gem_object_unbind(obj);
1784                                 if (ret)
1785                                         goto unlock;
1786                         }
1787         }
1788
1789         if (val & DROP_UNBOUND) {
1790                 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1791                                          global_list)
1792                         if (obj->pages_pin_count == 0) {
1793                                 ret = i915_gem_object_put_pages(obj);
1794                                 if (ret)
1795                                         goto unlock;
1796                         }
1797         }
1798
1799 unlock:
1800         mutex_unlock(&dev->struct_mutex);
1801
1802         return ret;
1803 }
1804
1805 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1806                         i915_drop_caches_get, i915_drop_caches_set,
1807                         "0x%08llx\n");
1808
1809 static int
1810 i915_max_freq_get(void *data, u64 *val)
1811 {
1812         struct drm_device *dev = data;
1813         drm_i915_private_t *dev_priv = dev->dev_private;
1814         int ret;
1815
1816         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1817                 return -ENODEV;
1818
1819         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1820         if (ret)
1821                 return ret;
1822
1823         if (IS_VALLEYVIEW(dev))
1824                 *val = vlv_gpu_freq(dev_priv->mem_freq,
1825                                     dev_priv->rps.max_delay);
1826         else
1827                 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1828         mutex_unlock(&dev_priv->rps.hw_lock);
1829
1830         return 0;
1831 }
1832
1833 static int
1834 i915_max_freq_set(void *data, u64 val)
1835 {
1836         struct drm_device *dev = data;
1837         struct drm_i915_private *dev_priv = dev->dev_private;
1838         int ret;
1839
1840         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1841                 return -ENODEV;
1842
1843         DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
1844
1845         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1846         if (ret)
1847                 return ret;
1848
1849         /*
1850          * Turbo will still be enabled, but won't go above the set value.
1851          */
1852         if (IS_VALLEYVIEW(dev)) {
1853                 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1854                 dev_priv->rps.max_delay = val;
1855                 gen6_set_rps(dev, val);
1856         } else {
1857                 do_div(val, GT_FREQUENCY_MULTIPLIER);
1858                 dev_priv->rps.max_delay = val;
1859                 gen6_set_rps(dev, val);
1860         }
1861
1862         mutex_unlock(&dev_priv->rps.hw_lock);
1863
1864         return 0;
1865 }
1866
1867 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
1868                         i915_max_freq_get, i915_max_freq_set,
1869                         "%llu\n");
1870
1871 static int
1872 i915_min_freq_get(void *data, u64 *val)
1873 {
1874         struct drm_device *dev = data;
1875         drm_i915_private_t *dev_priv = dev->dev_private;
1876         int ret;
1877
1878         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1879                 return -ENODEV;
1880
1881         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1882         if (ret)
1883                 return ret;
1884
1885         if (IS_VALLEYVIEW(dev))
1886                 *val = vlv_gpu_freq(dev_priv->mem_freq,
1887                                     dev_priv->rps.min_delay);
1888         else
1889                 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
1890         mutex_unlock(&dev_priv->rps.hw_lock);
1891
1892         return 0;
1893 }
1894
1895 static int
1896 i915_min_freq_set(void *data, u64 val)
1897 {
1898         struct drm_device *dev = data;
1899         struct drm_i915_private *dev_priv = dev->dev_private;
1900         int ret;
1901
1902         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1903                 return -ENODEV;
1904
1905         DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
1906
1907         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1908         if (ret)
1909                 return ret;
1910
1911         /*
1912          * Turbo will still be enabled, but won't go below the set value.
1913          */
1914         if (IS_VALLEYVIEW(dev)) {
1915                 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1916                 dev_priv->rps.min_delay = val;
1917                 valleyview_set_rps(dev, val);
1918         } else {
1919                 do_div(val, GT_FREQUENCY_MULTIPLIER);
1920                 dev_priv->rps.min_delay = val;
1921                 gen6_set_rps(dev, val);
1922         }
1923         mutex_unlock(&dev_priv->rps.hw_lock);
1924
1925         return 0;
1926 }
1927
1928 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
1929                         i915_min_freq_get, i915_min_freq_set,
1930                         "%llu\n");
1931
1932 static int
1933 i915_cache_sharing_get(void *data, u64 *val)
1934 {
1935         struct drm_device *dev = data;
1936         drm_i915_private_t *dev_priv = dev->dev_private;
1937         u32 snpcr;
1938         int ret;
1939
1940         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1941                 return -ENODEV;
1942
1943         ret = mutex_lock_interruptible(&dev->struct_mutex);
1944         if (ret)
1945                 return ret;
1946
1947         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1948         mutex_unlock(&dev_priv->dev->struct_mutex);
1949
1950         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1951
1952         return 0;
1953 }
1954
1955 static int
1956 i915_cache_sharing_set(void *data, u64 val)
1957 {
1958         struct drm_device *dev = data;
1959         struct drm_i915_private *dev_priv = dev->dev_private;
1960         u32 snpcr;
1961
1962         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1963                 return -ENODEV;
1964
1965         if (val > 3)
1966                 return -EINVAL;
1967
1968         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
1969
1970         /* Update the cache sharing policy here as well */
1971         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1972         snpcr &= ~GEN6_MBC_SNPCR_MASK;
1973         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1974         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1975
1976         return 0;
1977 }
1978
1979 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
1980                         i915_cache_sharing_get, i915_cache_sharing_set,
1981                         "%llu\n");
1982
1983 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1984  * allocated we need to hook into the minor for release. */
1985 static int
1986 drm_add_fake_info_node(struct drm_minor *minor,
1987                        struct dentry *ent,
1988                        const void *key)
1989 {
1990         struct drm_info_node *node;
1991
1992         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1993         if (node == NULL) {
1994                 debugfs_remove(ent);
1995                 return -ENOMEM;
1996         }
1997
1998         node->minor = minor;
1999         node->dent = ent;
2000         node->info_ent = (void *) key;
2001
2002         mutex_lock(&minor->debugfs_lock);
2003         list_add(&node->list, &minor->debugfs_list);
2004         mutex_unlock(&minor->debugfs_lock);
2005
2006         return 0;
2007 }
2008
2009 static int i915_forcewake_open(struct inode *inode, struct file *file)
2010 {
2011         struct drm_device *dev = inode->i_private;
2012         struct drm_i915_private *dev_priv = dev->dev_private;
2013
2014         if (INTEL_INFO(dev)->gen < 6)
2015                 return 0;
2016
2017         gen6_gt_force_wake_get(dev_priv);
2018
2019         return 0;
2020 }
2021
2022 static int i915_forcewake_release(struct inode *inode, struct file *file)
2023 {
2024         struct drm_device *dev = inode->i_private;
2025         struct drm_i915_private *dev_priv = dev->dev_private;
2026
2027         if (INTEL_INFO(dev)->gen < 6)
2028                 return 0;
2029
2030         gen6_gt_force_wake_put(dev_priv);
2031
2032         return 0;
2033 }
2034
2035 static const struct file_operations i915_forcewake_fops = {
2036         .owner = THIS_MODULE,
2037         .open = i915_forcewake_open,
2038         .release = i915_forcewake_release,
2039 };
2040
2041 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2042 {
2043         struct drm_device *dev = minor->dev;
2044         struct dentry *ent;
2045
2046         ent = debugfs_create_file("i915_forcewake_user",
2047                                   S_IRUSR,
2048                                   root, dev,
2049                                   &i915_forcewake_fops);
2050         if (IS_ERR(ent))
2051                 return PTR_ERR(ent);
2052
2053         return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2054 }
2055
2056 static int i915_debugfs_create(struct dentry *root,
2057                                struct drm_minor *minor,
2058                                const char *name,
2059                                const struct file_operations *fops)
2060 {
2061         struct drm_device *dev = minor->dev;
2062         struct dentry *ent;
2063
2064         ent = debugfs_create_file(name,
2065                                   S_IRUGO | S_IWUSR,
2066                                   root, dev,
2067                                   fops);
2068         if (IS_ERR(ent))
2069                 return PTR_ERR(ent);
2070
2071         return drm_add_fake_info_node(minor, ent, fops);
2072 }
2073
2074 static struct drm_info_list i915_debugfs_list[] = {
2075         {"i915_capabilities", i915_capabilities, 0},
2076         {"i915_gem_objects", i915_gem_object_info, 0},
2077         {"i915_gem_gtt", i915_gem_gtt_info, 0},
2078         {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2079         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2080         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2081         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2082         {"i915_gem_request", i915_gem_request_info, 0},
2083         {"i915_gem_seqno", i915_gem_seqno_info, 0},
2084         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2085         {"i915_gem_interrupt", i915_interrupt_info, 0},
2086         {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2087         {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2088         {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2089         {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2090         {"i915_rstdby_delays", i915_rstdby_delays, 0},
2091         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2092         {"i915_delayfreq_table", i915_delayfreq_table, 0},
2093         {"i915_inttoext_table", i915_inttoext_table, 0},
2094         {"i915_drpc_info", i915_drpc_info, 0},
2095         {"i915_emon_status", i915_emon_status, 0},
2096         {"i915_ring_freq_table", i915_ring_freq_table, 0},
2097         {"i915_gfxec", i915_gfxec, 0},
2098         {"i915_fbc_status", i915_fbc_status, 0},
2099         {"i915_ips_status", i915_ips_status, 0},
2100         {"i915_sr_status", i915_sr_status, 0},
2101         {"i915_opregion", i915_opregion, 0},
2102         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2103         {"i915_context_status", i915_context_status, 0},
2104         {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2105         {"i915_swizzle_info", i915_swizzle_info, 0},
2106         {"i915_ppgtt_info", i915_ppgtt_info, 0},
2107         {"i915_dpio", i915_dpio_info, 0},
2108         {"i915_llc", i915_llc, 0},
2109         {"i915_edp_psr_status", i915_edp_psr_status, 0},
2110 };
2111 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2112
2113 struct i915_debugfs_files {
2114         const char *name;
2115         const struct file_operations *fops;
2116 } i915_debugfs_files[] = {
2117         {"i915_wedged", &i915_wedged_fops},
2118         {"i915_max_freq", &i915_max_freq_fops},
2119         {"i915_min_freq", &i915_min_freq_fops},
2120         {"i915_cache_sharing", &i915_cache_sharing_fops},
2121         {"i915_ring_stop", &i915_ring_stop_fops},
2122         {"i915_gem_drop_caches", &i915_drop_caches_fops},
2123         {"i915_error_state", &i915_error_state_fops},
2124         {"i915_next_seqno", &i915_next_seqno_fops},
2125 };
2126
2127 int i915_debugfs_init(struct drm_minor *minor)
2128 {
2129         int ret, i;
2130
2131         ret = i915_forcewake_create(minor->debugfs_root, minor);
2132         if (ret)
2133                 return ret;
2134
2135         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2136                 ret = i915_debugfs_create(minor->debugfs_root, minor,
2137                                           i915_debugfs_files[i].name,
2138                                           i915_debugfs_files[i].fops);
2139                 if (ret)
2140                         return ret;
2141         }
2142
2143         return drm_debugfs_create_files(i915_debugfs_list,
2144                                         I915_DEBUGFS_ENTRIES,
2145                                         minor->debugfs_root, minor);
2146 }
2147
2148 void i915_debugfs_cleanup(struct drm_minor *minor)
2149 {
2150         int i;
2151
2152         drm_debugfs_remove_files(i915_debugfs_list,
2153                                  I915_DEBUGFS_ENTRIES, minor);
2154         drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2155                                  1, minor);
2156         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2157                 struct drm_info_list *info_list =
2158                         (struct drm_info_list *) i915_debugfs_files[i].fops;
2159
2160                 drm_debugfs_remove_files(info_list, 1, minor);
2161         }
2162 }
2163
2164 #endif /* CONFIG_DEBUG_FS */