2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
36 static u32 calc_residency(struct drm_device *dev, const u32 reg)
38 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */
40 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
42 if (!intel_enable_rc6(dev))
45 /* On VLV, residency time is in CZ units rather than 1.28us */
46 if (IS_VALLEYVIEW(dev)) {
49 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
50 CLK_CTL2_CZCOUNT_30NS_SHIFT;
52 WARN(!clkctl2, "bogus CZ count value");
55 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
59 div = 1000000ULL * bias;
62 raw_time = I915_READ(reg) * units;
63 return DIV_ROUND_UP_ULL(raw_time, div);
67 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
69 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
70 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
74 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
76 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
77 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
78 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
82 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
84 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
85 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
86 if (IS_VALLEYVIEW(dminor->dev))
88 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
92 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
94 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
95 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
96 if (IS_VALLEYVIEW(dminor->dev))
98 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
101 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
102 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
103 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
104 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
106 static struct attribute *rc6_attrs[] = {
107 &dev_attr_rc6_enable.attr,
108 &dev_attr_rc6_residency_ms.attr,
109 &dev_attr_rc6p_residency_ms.attr,
110 &dev_attr_rc6pp_residency_ms.attr,
114 static struct attribute_group rc6_attr_group = {
115 .name = power_group_name,
120 static int l3_access_valid(struct drm_device *dev, loff_t offset)
122 if (!HAS_L3_DPF(dev))
128 if (offset >= GEN7_L3LOG_SIZE)
135 i915_l3_read(struct file *filp, struct kobject *kobj,
136 struct bin_attribute *attr, char *buf,
137 loff_t offset, size_t count)
139 struct device *dev = container_of(kobj, struct device, kobj);
140 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
141 struct drm_device *drm_dev = dminor->dev;
142 struct drm_i915_private *dev_priv = drm_dev->dev_private;
143 int slice = (int)(uintptr_t)attr->private;
146 count = round_down(count, 4);
148 ret = l3_access_valid(drm_dev, offset);
152 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
154 ret = i915_mutex_lock_interruptible(drm_dev);
158 if (dev_priv->l3_parity.remap_info[slice])
160 dev_priv->l3_parity.remap_info[slice] + (offset/4),
163 memset(buf, 0, count);
165 mutex_unlock(&drm_dev->struct_mutex);
171 i915_l3_write(struct file *filp, struct kobject *kobj,
172 struct bin_attribute *attr, char *buf,
173 loff_t offset, size_t count)
175 struct device *dev = container_of(kobj, struct device, kobj);
176 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
177 struct drm_device *drm_dev = dminor->dev;
178 struct drm_i915_private *dev_priv = drm_dev->dev_private;
179 struct i915_hw_context *ctx;
180 u32 *temp = NULL; /* Just here to make handling failures easy */
181 int slice = (int)(uintptr_t)attr->private;
184 ret = l3_access_valid(drm_dev, offset);
188 if (dev_priv->hw_contexts_disabled)
191 ret = i915_mutex_lock_interruptible(drm_dev);
195 if (!dev_priv->l3_parity.remap_info[slice]) {
196 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
198 mutex_unlock(&drm_dev->struct_mutex);
203 ret = i915_gpu_idle(drm_dev);
206 mutex_unlock(&drm_dev->struct_mutex);
210 /* TODO: Ideally we really want a GPU reset here to make sure errors
211 * aren't propagated. Since I cannot find a stable way to reset the GPU
212 * at this point it is left as a TODO.
215 dev_priv->l3_parity.remap_info[slice] = temp;
217 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
219 /* NB: We defer the remapping until we switch to the context */
220 list_for_each_entry(ctx, &dev_priv->context_list, link)
221 ctx->remap_slice |= (1<<slice);
223 mutex_unlock(&drm_dev->struct_mutex);
228 static struct bin_attribute dpf_attrs = {
229 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
230 .size = GEN7_L3LOG_SIZE,
231 .read = i915_l3_read,
232 .write = i915_l3_write,
237 static struct bin_attribute dpf_attrs_1 = {
238 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
239 .size = GEN7_L3LOG_SIZE,
240 .read = i915_l3_read,
241 .write = i915_l3_write,
246 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
247 struct device_attribute *attr, char *buf)
249 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
250 struct drm_device *dev = minor->dev;
251 struct drm_i915_private *dev_priv = dev->dev_private;
254 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
256 mutex_lock(&dev_priv->rps.hw_lock);
257 if (IS_VALLEYVIEW(dev_priv->dev)) {
259 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
260 ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
262 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
264 mutex_unlock(&dev_priv->rps.hw_lock);
266 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
269 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
270 struct device_attribute *attr, char *buf)
272 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
273 struct drm_device *dev = minor->dev;
274 struct drm_i915_private *dev_priv = dev->dev_private;
276 return snprintf(buf, PAGE_SIZE, "%d\n",
277 vlv_gpu_freq(dev_priv->mem_freq,
278 dev_priv->rps.rpe_delay));
281 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
283 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
284 struct drm_device *dev = minor->dev;
285 struct drm_i915_private *dev_priv = dev->dev_private;
288 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
290 mutex_lock(&dev_priv->rps.hw_lock);
291 if (IS_VALLEYVIEW(dev_priv->dev))
292 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
294 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
295 mutex_unlock(&dev_priv->rps.hw_lock);
297 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
300 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
301 struct device_attribute *attr,
302 const char *buf, size_t count)
304 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
305 struct drm_device *dev = minor->dev;
306 struct drm_i915_private *dev_priv = dev->dev_private;
307 u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
310 ret = kstrtou32(buf, 0, &val);
314 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
316 mutex_lock(&dev_priv->rps.hw_lock);
318 if (IS_VALLEYVIEW(dev_priv->dev)) {
319 val = vlv_freq_opcode(dev_priv->mem_freq, val);
321 hw_max = valleyview_rps_max_freq(dev_priv);
322 hw_min = valleyview_rps_min_freq(dev_priv);
325 val /= GT_FREQUENCY_MULTIPLIER;
327 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
328 hw_max = dev_priv->rps.hw_max;
329 non_oc_max = (rp_state_cap & 0xff);
330 hw_min = ((rp_state_cap & 0xff0000) >> 16);
333 if (val < hw_min || val > hw_max ||
334 val < dev_priv->rps.min_delay) {
335 mutex_unlock(&dev_priv->rps.hw_lock);
339 if (val > non_oc_max)
340 DRM_DEBUG("User requested overclocking to %d\n",
341 val * GT_FREQUENCY_MULTIPLIER);
343 if (dev_priv->rps.cur_delay > val) {
344 if (IS_VALLEYVIEW(dev_priv->dev))
345 valleyview_set_rps(dev_priv->dev, val);
347 gen6_set_rps(dev_priv->dev, val);
350 dev_priv->rps.max_delay = val;
352 mutex_unlock(&dev_priv->rps.hw_lock);
357 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
359 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
360 struct drm_device *dev = minor->dev;
361 struct drm_i915_private *dev_priv = dev->dev_private;
364 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
366 mutex_lock(&dev_priv->rps.hw_lock);
367 if (IS_VALLEYVIEW(dev_priv->dev))
368 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
370 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
371 mutex_unlock(&dev_priv->rps.hw_lock);
373 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
376 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
377 struct device_attribute *attr,
378 const char *buf, size_t count)
380 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
381 struct drm_device *dev = minor->dev;
382 struct drm_i915_private *dev_priv = dev->dev_private;
383 u32 val, rp_state_cap, hw_max, hw_min;
386 ret = kstrtou32(buf, 0, &val);
390 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
392 mutex_lock(&dev_priv->rps.hw_lock);
394 if (IS_VALLEYVIEW(dev)) {
395 val = vlv_freq_opcode(dev_priv->mem_freq, val);
397 hw_max = valleyview_rps_max_freq(dev_priv);
398 hw_min = valleyview_rps_min_freq(dev_priv);
400 val /= GT_FREQUENCY_MULTIPLIER;
402 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
403 hw_max = dev_priv->rps.hw_max;
404 hw_min = ((rp_state_cap & 0xff0000) >> 16);
407 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
408 mutex_unlock(&dev_priv->rps.hw_lock);
412 if (dev_priv->rps.cur_delay < val) {
413 if (IS_VALLEYVIEW(dev))
414 valleyview_set_rps(dev, val);
416 gen6_set_rps(dev_priv->dev, val);
419 dev_priv->rps.min_delay = val;
421 mutex_unlock(&dev_priv->rps.hw_lock);
427 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
428 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
429 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
431 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
433 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
434 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
435 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
436 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
438 /* For now we have a static number of RP states */
439 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
441 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
442 struct drm_device *dev = minor->dev;
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 u32 val, rp_state_cap;
447 ret = mutex_lock_interruptible(&dev->struct_mutex);
450 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
451 mutex_unlock(&dev->struct_mutex);
453 if (attr == &dev_attr_gt_RP0_freq_mhz) {
454 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
455 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
456 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
457 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
458 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
462 return snprintf(buf, PAGE_SIZE, "%d\n", val);
465 static const struct attribute *gen6_attrs[] = {
466 &dev_attr_gt_cur_freq_mhz.attr,
467 &dev_attr_gt_max_freq_mhz.attr,
468 &dev_attr_gt_min_freq_mhz.attr,
469 &dev_attr_gt_RP0_freq_mhz.attr,
470 &dev_attr_gt_RP1_freq_mhz.attr,
471 &dev_attr_gt_RPn_freq_mhz.attr,
475 static const struct attribute *vlv_attrs[] = {
476 &dev_attr_gt_cur_freq_mhz.attr,
477 &dev_attr_gt_max_freq_mhz.attr,
478 &dev_attr_gt_min_freq_mhz.attr,
479 &dev_attr_vlv_rpe_freq_mhz.attr,
483 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
484 struct bin_attribute *attr, char *buf,
485 loff_t off, size_t count)
488 struct device *kdev = container_of(kobj, struct device, kobj);
489 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
490 struct drm_device *dev = minor->dev;
491 struct i915_error_state_file_priv error_priv;
492 struct drm_i915_error_state_buf error_str;
493 ssize_t ret_count = 0;
496 memset(&error_priv, 0, sizeof(error_priv));
498 ret = i915_error_state_buf_init(&error_str, count, off);
502 error_priv.dev = dev;
503 i915_error_state_get(dev, &error_priv);
505 ret = i915_error_state_to_str(&error_str, &error_priv);
509 ret_count = count < error_str.bytes ? count : error_str.bytes;
511 memcpy(buf, error_str.buf, ret_count);
513 i915_error_state_put(&error_priv);
514 i915_error_state_buf_release(&error_str);
516 return ret ?: ret_count;
519 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
520 struct bin_attribute *attr, char *buf,
521 loff_t off, size_t count)
523 struct device *kdev = container_of(kobj, struct device, kobj);
524 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
525 struct drm_device *dev = minor->dev;
528 DRM_DEBUG_DRIVER("Resetting error state\n");
530 ret = mutex_lock_interruptible(&dev->struct_mutex);
534 i915_destroy_error_state(dev);
535 mutex_unlock(&dev->struct_mutex);
540 static struct bin_attribute error_state_attr = {
541 .attr.name = "error",
542 .attr.mode = S_IRUSR | S_IWUSR,
544 .read = error_state_read,
545 .write = error_state_write,
548 void i915_setup_sysfs(struct drm_device *dev)
553 if (INTEL_INFO(dev)->gen >= 6) {
554 ret = sysfs_merge_group(&dev->primary->kdev.kobj,
557 DRM_ERROR("RC6 residency sysfs setup failed\n");
560 if (HAS_L3_DPF(dev)) {
561 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
563 DRM_ERROR("l3 parity sysfs setup failed\n");
565 if (NUM_L3_SLICES(dev) > 1) {
566 ret = device_create_bin_file(&dev->primary->kdev,
569 DRM_ERROR("l3 parity slice 1 setup failed\n");
574 if (IS_VALLEYVIEW(dev))
575 ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
576 else if (INTEL_INFO(dev)->gen >= 6)
577 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
579 DRM_ERROR("RPS sysfs setup failed\n");
581 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
584 DRM_ERROR("error_state sysfs setup failed\n");
587 void i915_teardown_sysfs(struct drm_device *dev)
589 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
590 if (IS_VALLEYVIEW(dev))
591 sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
593 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
594 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1);
595 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
597 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);