2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Ben Skeggs <bskeggs@redhat.com>
30 * Roy Spliet <r.spliet@student.tudelft.nl>
36 #include "drm_sarea.h"
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
41 #include <subdev/vm.h>
42 #include <engine/fifo.h>
43 #include "nouveau_fence.h"
46 * NV10-NV40 tiling helpers
50 nv10_mem_update_tile_region(struct drm_device *dev,
51 struct nouveau_tile_reg *tilereg, uint32_t addr,
52 uint32_t size, uint32_t pitch, uint32_t flags)
54 struct drm_nouveau_private *dev_priv = dev->dev_private;
55 int i = tilereg - dev_priv->tile.reg, j;
56 struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
59 nouveau_fence_unref(&tilereg->fence);
62 nvfb_tile_fini(dev, i);
65 nvfb_tile_init(dev, i, addr, size, pitch, flags);
67 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
68 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
69 nv04_fifo_cache_pull(dev, false);
71 nouveau_wait_for_idle(dev);
73 nvfb_tile_prog(dev, i);
74 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
75 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
76 dev_priv->eng[j]->set_tile_region(dev, i);
79 nv04_fifo_cache_pull(dev, true);
80 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
81 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
84 static struct nouveau_tile_reg *
85 nv10_mem_get_tile_region(struct drm_device *dev, int i)
87 struct drm_nouveau_private *dev_priv = dev->dev_private;
88 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
90 spin_lock(&dev_priv->tile.lock);
93 (!tile->fence || nouveau_fence_done(tile->fence)))
98 spin_unlock(&dev_priv->tile.lock);
103 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
104 struct nouveau_fence *fence)
106 struct drm_nouveau_private *dev_priv = dev->dev_private;
109 spin_lock(&dev_priv->tile.lock);
111 /* Mark it as pending. */
113 nouveau_fence_ref(fence);
117 spin_unlock(&dev_priv->tile.lock);
121 struct nouveau_tile_reg *
122 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
123 uint32_t pitch, uint32_t flags)
125 struct nouveau_tile_reg *tile, *found = NULL;
128 for (i = 0; i < nvfb_tile_nr(dev); i++) {
129 tile = nv10_mem_get_tile_region(dev, i);
131 if (pitch && !found) {
135 } else if (tile && nvfb_tile(dev, i)->pitch) {
136 /* Kill an unused tile region. */
137 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
140 nv10_mem_put_tile_region(dev, tile, NULL);
144 nv10_mem_update_tile_region(dev, found, addr, size,
153 nouveau_mem_vram_fini(struct drm_device *dev)
155 struct drm_nouveau_private *dev_priv = dev->dev_private;
157 ttm_bo_device_release(&dev_priv->ttm.bdev);
159 nouveau_ttm_global_release(dev_priv);
161 if (dev_priv->fb_mtrr >= 0) {
162 drm_mtrr_del(dev_priv->fb_mtrr,
163 pci_resource_start(dev->pdev, 1),
164 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
165 dev_priv->fb_mtrr = -1;
170 nouveau_mem_gart_fini(struct drm_device *dev)
172 nouveau_sgdma_takedown(dev);
176 nouveau_mem_vram_init(struct drm_device *dev)
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
183 if (dev_priv->card_type >= NV_50) {
184 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
187 if (0 && pci_is_pcie(dev->pdev) &&
188 dev_priv->chipset > 0x40 &&
189 dev_priv->chipset != 0x45) {
190 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
194 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
197 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
199 /* Reset to default value. */
200 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
204 ret = nouveau_ttm_global_init(dev_priv);
208 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
209 dev_priv->ttm.bo_global_ref.ref.object,
210 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
211 dma_bits <= 32 ? true : false);
213 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
217 dev_priv->fb_available_size = nvfb_vram_size(dev);
218 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
219 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
220 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
221 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
223 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
224 dev_priv->fb_aper_free = dev_priv->fb_available_size;
227 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
228 dev_priv->fb_available_size >> PAGE_SHIFT);
230 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
234 if (dev_priv->card_type < NV_50) {
235 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
236 0, 0, NULL, &dev_priv->vga_ram);
238 ret = nouveau_bo_pin(dev_priv->vga_ram,
242 NV_WARN(dev, "failed to reserve VGA memory\n");
243 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
247 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
248 pci_resource_len(dev->pdev, 1),
254 nouveau_mem_gart_init(struct drm_device *dev)
256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
260 if (!nvdrm_gart_init(dev, &dev_priv->gart_info.aper_base,
261 &dev_priv->gart_info.aper_size))
262 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
264 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
265 ret = nouveau_sgdma_init(dev);
267 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
272 NV_INFO(dev, "%d MiB GART (aperture)\n",
273 (int)(dev_priv->gart_info.aper_size >> 20));
274 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
276 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
277 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
279 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
287 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
288 struct nouveau_pm_tbl_entry *e, u8 len,
289 struct nouveau_pm_memtiming *boot,
290 struct nouveau_pm_memtiming *t)
292 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
294 /* XXX: I don't trust the -1's and +1's... they must come
296 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
298 (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
299 (e->tCL + 2 - (t->tCWL - 1));
301 t->reg[2] = 0x20200000 |
302 ((t->tCWL - 1) << 24 |
307 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
308 t->reg[0], t->reg[1], t->reg[2]);
313 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
314 struct nouveau_pm_tbl_entry *e, u8 len,
315 struct nouveau_pm_memtiming *boot,
316 struct nouveau_pm_memtiming *t)
319 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
321 if (bit_table(dev, 'P', &P))
324 switch (min(len, (u8) 22)) {
337 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
339 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
340 max(unk18, (u8) 1) << 16 |
341 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
343 t->reg[2] = ((t->tCWL - 1) << 24 |
348 t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
350 t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
352 t->reg[8] = boot->reg[8] & 0xffffff00;
354 if (P.version == 1) {
355 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
357 t->reg[3] = (0x14 + e->tCL) << 24 |
362 t->reg[4] |= boot->reg[4] & 0xffff0000;
364 t->reg[6] = (0x33 - t->tCWL) << 16 |
366 (0x2e + e->tCL - t->tCWL);
368 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
370 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
371 if (nvfb_vram_type(dev) == NV_MEM_TYPE_DDR2) {
372 t->reg[5] |= (e->tCL + 3) << 8;
373 t->reg[6] |= (t->tCWL - 2) << 8;
374 t->reg[8] |= (e->tCL - 4);
376 t->reg[5] |= (e->tCL + 2) << 8;
377 t->reg[6] |= t->tCWL << 8;
378 t->reg[8] |= (e->tCL - 2);
381 t->reg[1] |= (5 + e->tCL - (t->tCWL));
383 /* XXX: 0xb? 0x30? */
384 t->reg[3] = (0x30 + e->tCL) << 24 |
385 (boot->reg[3] & 0x00ff0000)|
386 (0xb + e->tCL) << 8 |
389 t->reg[4] |= (unk20 << 24 | unk21 << 16);
392 t->reg[5] |= (t->tCWL + 6) << 8;
394 t->reg[6] = (0x5a + e->tCL) << 16 |
395 (6 - e->tCL + t->tCWL) << 8 |
396 (0x50 + e->tCL - t->tCWL);
398 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
399 t->reg[7] = (tmp7_3 << 24) |
400 ((tmp7_3 - 6 + e->tCL) << 16) |
404 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
405 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
406 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
407 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
408 NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
413 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
414 struct nouveau_pm_tbl_entry *e, u8 len,
415 struct nouveau_pm_memtiming *boot,
416 struct nouveau_pm_memtiming *t)
421 t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
422 e->tRFC << 8 | e->tRC);
424 t->reg[1] = (boot->reg[1] & 0xff000000) |
425 (e->tRCDWR & 0x0f) << 20 |
426 (e->tRCDRD & 0x0f) << 14 |
430 t->reg[2] = (boot->reg[2] & 0xff0000ff) |
431 e->tWR << 16 | e->tWTR << 8;
433 t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
434 (e->tUNK_21 & 0xf) << 5 |
437 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
438 (e->tRRD&0x1f) << 15;
440 NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
441 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
442 NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
447 * MR generation methods
451 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
452 struct nouveau_pm_tbl_entry *e, u8 len,
453 struct nouveau_pm_memtiming *boot,
454 struct nouveau_pm_memtiming *t)
456 t->drive_strength = 0;
460 t->odt = e->RAM_FT1 & 0x07;
463 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
464 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
468 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
469 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
474 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
479 t->mr[0] = (boot->mr[0] & 0x100f) |
482 t->mr[1] = (boot->mr[1] & 0x101fbb) |
483 (t->odt & 0x1) << 2 |
486 NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
490 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
491 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
494 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
495 struct nouveau_pm_tbl_entry *e, u8 len,
496 struct nouveau_pm_memtiming *boot,
497 struct nouveau_pm_memtiming *t)
501 t->drive_strength = 0;
505 t->odt = e->RAM_FT1 & 0x07;
508 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
509 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
513 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
514 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
519 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
523 t->mr[0] = (boot->mr[0] & 0x180b) |
527 (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
528 t->mr[1] = (boot->mr[1] & 0x101dbb) |
529 (t->odt & 0x1) << 2 |
530 (t->odt & 0x2) << 5 |
532 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
534 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
538 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
539 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
540 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
541 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
544 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
545 struct nouveau_pm_tbl_entry *e, u8 len,
546 struct nouveau_pm_memtiming *boot,
547 struct nouveau_pm_memtiming *t)
550 t->drive_strength = boot->drive_strength;
553 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
554 t->odt = e->RAM_FT1 & 0x07;
557 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
558 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
562 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
563 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
568 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
573 t->mr[0] = (boot->mr[0] & 0xe0b) |
575 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
576 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
577 t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
579 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
580 t->mr[2] = boot->mr[2];
582 NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
583 t->mr[0], t->mr[1], t->mr[2]);
588 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
589 struct nouveau_pm_tbl_entry *e, u8 len,
590 struct nouveau_pm_memtiming *boot,
591 struct nouveau_pm_memtiming *t)
594 t->drive_strength = boot->drive_strength;
597 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
598 t->odt = e->RAM_FT1 & 0x03;
601 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
602 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
606 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
607 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
612 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
617 t->mr[0] = (boot->mr[0] & 0x007) |
618 ((e->tCL - 5) << 3) |
620 t->mr[1] = (boot->mr[1] & 0x1007f0) |
624 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
629 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
630 struct nouveau_pm_memtiming *t)
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
634 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
635 struct nouveau_pm_tbl_entry *e;
636 u8 ver, len, *ptr, *ramcfg;
639 ptr = nouveau_perf_timing(dev, freq, &ver, &len);
640 if (!ptr || ptr[0] == 0x00) {
644 e = (struct nouveau_pm_tbl_entry *)ptr;
646 t->tCWL = boot->tCWL;
648 switch (dev_priv->card_type) {
650 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
653 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
657 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
664 switch (nvfb_vram_type(dev) * !ret) {
665 case NV_MEM_TYPE_GDDR3:
666 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
668 case NV_MEM_TYPE_GDDR5:
669 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
671 case NV_MEM_TYPE_DDR2:
672 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
674 case NV_MEM_TYPE_DDR3:
675 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
682 ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
687 dll_off = !!(ramcfg[3] & 0x04);
689 dll_off = !!(ramcfg[2] & 0x40);
691 switch (nvfb_vram_type(dev)) {
692 case NV_MEM_TYPE_GDDR3:
693 t->mr[1] &= ~0x00000040;
694 t->mr[1] |= 0x00000040 * dll_off;
697 t->mr[1] &= ~0x00000001;
698 t->mr[1] |= 0x00000001 * dll_off;
707 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
709 struct drm_nouveau_private *dev_priv = dev->dev_private;
710 u32 timing_base, timing_regs, mr_base;
713 if (dev_priv->card_type >= 0xC0) {
714 timing_base = 0x10f290;
717 timing_base = 0x100220;
723 switch (dev_priv->card_type) {
739 for(i = 0; i < timing_regs; i++)
740 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
743 if (dev_priv->card_type < NV_C0) {
744 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
745 } else if (dev_priv->card_type <= NV_D0) {
746 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
749 t->mr[0] = nv_rd32(dev, mr_base);
750 t->mr[1] = nv_rd32(dev, mr_base + 0x04);
751 t->mr[2] = nv_rd32(dev, mr_base + 0x20);
752 t->mr[3] = nv_rd32(dev, mr_base + 0x24);
755 t->drive_strength = 0;
757 switch (nvfb_vram_type(dev)) {
758 case NV_MEM_TYPE_DDR3:
759 t->odt |= (t->mr[1] & 0x200) >> 7;
760 case NV_MEM_TYPE_DDR2:
761 t->odt |= (t->mr[1] & 0x04) >> 2 |
762 (t->mr[1] & 0x40) >> 5;
764 case NV_MEM_TYPE_GDDR3:
765 case NV_MEM_TYPE_GDDR5:
766 t->drive_strength = t->mr[1] & 0x03;
767 t->odt = (t->mr[1] & 0x0c) >> 2;
775 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
776 struct nouveau_pm_level *perflvl)
778 struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
779 struct nouveau_pm_memtiming *info = &perflvl->timing;
780 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
781 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
784 switch (nvfb_vram_type(dev_priv->dev)) {
785 case NV_MEM_TYPE_DDR2:
787 mr1_dlloff = 0x00000001;
789 case NV_MEM_TYPE_DDR3:
793 mr1_dlloff = 0x00000001;
795 case NV_MEM_TYPE_GDDR3:
797 mr1_dlloff = 0x00000040;
800 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
804 /* fetch current MRs */
805 switch (nvfb_vram_type(dev_priv->dev)) {
806 case NV_MEM_TYPE_GDDR3:
807 case NV_MEM_TYPE_DDR3:
808 mr[2] = exec->mrg(exec, 2);
810 mr[1] = exec->mrg(exec, 1);
811 mr[0] = exec->mrg(exec, 0);
815 /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
816 if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
817 exec->precharge(exec);
818 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
819 exec->wait(exec, tMRD);
822 /* enter self-refresh mode */
823 exec->precharge(exec);
826 exec->refresh_auto(exec, false);
827 exec->refresh_self(exec, true);
828 exec->wait(exec, tCKSRE);
830 /* modify input clock frequency */
831 exec->clock_set(exec);
833 /* exit self-refresh mode */
834 exec->wait(exec, tCKSRX);
835 exec->precharge(exec);
836 exec->refresh_self(exec, false);
837 exec->refresh_auto(exec, true);
838 exec->wait(exec, tXS);
839 exec->wait(exec, tXS);
842 if (mr[2] != info->mr[2]) {
843 exec->mrs (exec, 2, info->mr[2]);
844 exec->wait(exec, tMRD);
847 if (mr[1] != info->mr[1]) {
848 /* need to keep DLL off until later, at least on GDDR3 */
849 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
850 exec->wait(exec, tMRD);
853 if (mr[0] != info->mr[0]) {
854 exec->mrs (exec, 0, info->mr[0]);
855 exec->wait(exec, tMRD);
858 /* update PFB timing registers */
859 exec->timing_set(exec);
861 /* DLL (enable + ) reset */
862 if (!(info->mr[1] & mr1_dlloff)) {
863 if (mr[1] & mr1_dlloff) {
864 exec->mrs (exec, 1, info->mr[1]);
865 exec->wait(exec, tMRD);
867 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
868 exec->wait(exec, tMRD);
869 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
870 exec->wait(exec, tMRD);
871 exec->wait(exec, tDLLK);
872 if (nvfb_vram_type(dev_priv->dev) == NV_MEM_TYPE_GDDR3)
873 exec->precharge(exec);
880 nouveau_mem_vbios_type(struct drm_device *dev)
883 u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
884 if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
885 u8 *table = ROMPTR(dev, M.data[3]);
886 if (table && table[0] == 0x10 && ramcfg < table[3]) {
887 u8 *entry = table + table[1] + (ramcfg * table[2]);
888 switch (entry[0] & 0x0f) {
889 case 0: return NV_MEM_TYPE_DDR2;
890 case 1: return NV_MEM_TYPE_DDR3;
891 case 2: return NV_MEM_TYPE_GDDR3;
892 case 3: return NV_MEM_TYPE_GDDR5;
899 return NV_MEM_TYPE_UNKNOWN;
903 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
910 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
917 nouveau_mem_node_cleanup(struct nouveau_mem *node)
919 if (node->vma[0].node) {
920 nouveau_vm_unmap(&node->vma[0]);
921 nouveau_vm_put(&node->vma[0]);
924 if (node->vma[1].node) {
925 nouveau_vm_unmap(&node->vma[1]);
926 nouveau_vm_put(&node->vma[1]);
931 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
932 struct ttm_mem_reg *mem)
934 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
935 struct drm_device *dev = dev_priv->dev;
937 nouveau_mem_node_cleanup(mem->mm_node);
938 nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
942 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
943 struct ttm_buffer_object *bo,
944 struct ttm_placement *placement,
945 struct ttm_mem_reg *mem)
947 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
948 struct drm_device *dev = dev_priv->dev;
949 struct nouveau_bo *nvbo = nouveau_bo(bo);
950 struct nouveau_mem *node;
954 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
955 size_nc = 1 << nvbo->page_shift;
957 ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
958 mem->page_alignment << PAGE_SHIFT, size_nc,
959 (nvbo->tile_flags >> 8) & 0x3ff, &node);
962 return (ret == -ENOSPC) ? 0 : ret;
965 node->page_shift = nvbo->page_shift;
968 mem->start = node->offset >> PAGE_SHIFT;
973 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
975 struct nouveau_mm *mm = man->priv;
976 struct nouveau_mm_node *r;
977 u32 total = 0, free = 0;
979 mutex_lock(&mm->mutex);
980 list_for_each_entry(r, &mm->nodes, nl_entry) {
981 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
982 prefix, r->type, ((u64)r->offset << 12),
983 (((u64)r->offset + r->length) << 12));
989 mutex_unlock(&mm->mutex);
991 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
992 prefix, (u64)total << 12, (u64)free << 12);
993 printk(KERN_DEBUG "%s block: 0x%08x\n",
994 prefix, mm->block_size << 12);
997 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
998 nouveau_vram_manager_init,
999 nouveau_vram_manager_fini,
1000 nouveau_vram_manager_new,
1001 nouveau_vram_manager_del,
1002 nouveau_vram_manager_debug
1006 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1012 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1018 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1019 struct ttm_mem_reg *mem)
1021 nouveau_mem_node_cleanup(mem->mm_node);
1022 kfree(mem->mm_node);
1023 mem->mm_node = NULL;
1027 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1028 struct ttm_buffer_object *bo,
1029 struct ttm_placement *placement,
1030 struct ttm_mem_reg *mem)
1032 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1033 struct nouveau_mem *node;
1035 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1036 dev_priv->gart_info.aper_size))
1039 node = kzalloc(sizeof(*node), GFP_KERNEL);
1042 node->page_shift = 12;
1044 mem->mm_node = node;
1050 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1054 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1055 nouveau_gart_manager_init,
1056 nouveau_gart_manager_fini,
1057 nouveau_gart_manager_new,
1058 nouveau_gart_manager_del,
1059 nouveau_gart_manager_debug