]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/nouveau/nouveau_mem.c
drm/nouveau/fb: merge fb/vram and port to subdev interfaces
[~andy/linux] / drivers / gpu / drm / nouveau / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Ben Skeggs <bskeggs@redhat.com>
30  *    Roy Spliet <r.spliet@student.tudelft.nl>
31  */
32
33
34 #include "drmP.h"
35 #include "drm.h"
36 #include "drm_sarea.h"
37
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include <core/mm.h>
41 #include <subdev/vm.h>
42 #include <engine/fifo.h>
43 #include "nouveau_fence.h"
44
45 /*
46  * NV10-NV40 tiling helpers
47  */
48
49 static void
50 nv10_mem_update_tile_region(struct drm_device *dev,
51                             struct nouveau_tile_reg *tilereg, uint32_t addr,
52                             uint32_t size, uint32_t pitch, uint32_t flags)
53 {
54         struct drm_nouveau_private *dev_priv = dev->dev_private;
55         int i = tilereg - dev_priv->tile.reg, j;
56         struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
57         unsigned long save;
58
59         nouveau_fence_unref(&tilereg->fence);
60
61         if (tile->pitch)
62                 nvfb_tile_fini(dev, i);
63
64         if (pitch)
65                 nvfb_tile_init(dev, i, addr, size, pitch, flags);
66
67         spin_lock_irqsave(&dev_priv->context_switch_lock, save);
68         nv_wr32(dev, NV03_PFIFO_CACHES, 0);
69         nv04_fifo_cache_pull(dev, false);
70
71         nouveau_wait_for_idle(dev);
72
73         nvfb_tile_prog(dev, i);
74         for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
75                 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
76                         dev_priv->eng[j]->set_tile_region(dev, i);
77         }
78
79         nv04_fifo_cache_pull(dev, true);
80         nv_wr32(dev, NV03_PFIFO_CACHES, 1);
81         spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
82 }
83
84 static struct nouveau_tile_reg *
85 nv10_mem_get_tile_region(struct drm_device *dev, int i)
86 {
87         struct drm_nouveau_private *dev_priv = dev->dev_private;
88         struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
89
90         spin_lock(&dev_priv->tile.lock);
91
92         if (!tile->used &&
93             (!tile->fence || nouveau_fence_done(tile->fence)))
94                 tile->used = true;
95         else
96                 tile = NULL;
97
98         spin_unlock(&dev_priv->tile.lock);
99         return tile;
100 }
101
102 void
103 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
104                          struct nouveau_fence *fence)
105 {
106         struct drm_nouveau_private *dev_priv = dev->dev_private;
107
108         if (tile) {
109                 spin_lock(&dev_priv->tile.lock);
110                 if (fence) {
111                         /* Mark it as pending. */
112                         tile->fence = fence;
113                         nouveau_fence_ref(fence);
114                 }
115
116                 tile->used = false;
117                 spin_unlock(&dev_priv->tile.lock);
118         }
119 }
120
121 struct nouveau_tile_reg *
122 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
123                     uint32_t pitch, uint32_t flags)
124 {
125         struct nouveau_tile_reg *tile, *found = NULL;
126         int i;
127
128         for (i = 0; i < nvfb_tile_nr(dev); i++) {
129                 tile = nv10_mem_get_tile_region(dev, i);
130
131                 if (pitch && !found) {
132                         found = tile;
133                         continue;
134
135                 } else if (tile && nvfb_tile(dev, i)->pitch) {
136                         /* Kill an unused tile region. */
137                         nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
138                 }
139
140                 nv10_mem_put_tile_region(dev, tile, NULL);
141         }
142
143         if (found)
144                 nv10_mem_update_tile_region(dev, found, addr, size,
145                                             pitch, flags);
146         return found;
147 }
148
149 /*
150  * Cleanup everything
151  */
152 void
153 nouveau_mem_vram_fini(struct drm_device *dev)
154 {
155         struct drm_nouveau_private *dev_priv = dev->dev_private;
156
157         ttm_bo_device_release(&dev_priv->ttm.bdev);
158
159         nouveau_ttm_global_release(dev_priv);
160
161         if (dev_priv->fb_mtrr >= 0) {
162                 drm_mtrr_del(dev_priv->fb_mtrr,
163                              pci_resource_start(dev->pdev, 1),
164                              pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
165                 dev_priv->fb_mtrr = -1;
166         }
167 }
168
169 void
170 nouveau_mem_gart_fini(struct drm_device *dev)
171 {
172         nouveau_sgdma_takedown(dev);
173 }
174
175 int
176 nouveau_mem_vram_init(struct drm_device *dev)
177 {
178         struct drm_nouveau_private *dev_priv = dev->dev_private;
179         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
180         int ret, dma_bits;
181
182         dma_bits = 32;
183         if (dev_priv->card_type >= NV_50) {
184                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
185                         dma_bits = 40;
186         } else
187         if (0 && pci_is_pcie(dev->pdev) &&
188             dev_priv->chipset  > 0x40 &&
189             dev_priv->chipset != 0x45) {
190                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
191                         dma_bits = 39;
192         }
193
194         ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
195         if (ret)
196                 return ret;
197         ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
198         if (ret) {
199                 /* Reset to default value. */
200                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
201         }
202
203
204         ret = nouveau_ttm_global_init(dev_priv);
205         if (ret)
206                 return ret;
207
208         ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
209                                  dev_priv->ttm.bo_global_ref.ref.object,
210                                  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
211                                  dma_bits <= 32 ? true : false);
212         if (ret) {
213                 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
214                 return ret;
215         }
216
217         dev_priv->fb_available_size = nvfb_vram_size(dev);
218         dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
219         if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
220                 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
221         dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
222
223         dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
224         dev_priv->fb_aper_free = dev_priv->fb_available_size;
225
226         /* mappable vram */
227         ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
228                              dev_priv->fb_available_size >> PAGE_SHIFT);
229         if (ret) {
230                 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
231                 return ret;
232         }
233
234         if (dev_priv->card_type < NV_50) {
235                 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
236                                      0, 0, NULL, &dev_priv->vga_ram);
237                 if (ret == 0)
238                         ret = nouveau_bo_pin(dev_priv->vga_ram,
239                                              TTM_PL_FLAG_VRAM);
240
241                 if (ret) {
242                         NV_WARN(dev, "failed to reserve VGA memory\n");
243                         nouveau_bo_ref(NULL, &dev_priv->vga_ram);
244                 }
245         }
246
247         dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
248                                          pci_resource_len(dev->pdev, 1),
249                                          DRM_MTRR_WC);
250         return 0;
251 }
252
253 int
254 nouveau_mem_gart_init(struct drm_device *dev)
255 {
256         struct drm_nouveau_private *dev_priv = dev->dev_private;
257         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
258         int ret;
259
260         if (!nvdrm_gart_init(dev, &dev_priv->gart_info.aper_base,
261                                   &dev_priv->gart_info.aper_size))
262                 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
263
264         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
265                 ret = nouveau_sgdma_init(dev);
266                 if (ret) {
267                         NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
268                         return ret;
269                 }
270         }
271
272         NV_INFO(dev, "%d MiB GART (aperture)\n",
273                 (int)(dev_priv->gart_info.aper_size >> 20));
274         dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
275
276         ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
277                              dev_priv->gart_info.aper_size >> PAGE_SHIFT);
278         if (ret) {
279                 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
280                 return ret;
281         }
282
283         return 0;
284 }
285
286 static int
287 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
288                      struct nouveau_pm_tbl_entry *e, u8 len,
289                      struct nouveau_pm_memtiming *boot,
290                      struct nouveau_pm_memtiming *t)
291 {
292         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
293
294         /* XXX: I don't trust the -1's and +1's... they must come
295          *      from somewhere! */
296         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
297                     1 << 16 |
298                     (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
299                     (e->tCL + 2 - (t->tCWL - 1));
300
301         t->reg[2] = 0x20200000 |
302                     ((t->tCWL - 1) << 24 |
303                      e->tRRD << 16 |
304                      e->tRCDWR << 8 |
305                      e->tRCDRD);
306
307         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
308                  t->reg[0], t->reg[1], t->reg[2]);
309         return 0;
310 }
311
312 static int
313 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
314                      struct nouveau_pm_tbl_entry *e, u8 len,
315                      struct nouveau_pm_memtiming *boot,
316                      struct nouveau_pm_memtiming *t)
317 {
318         struct bit_entry P;
319         uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
320
321         if (bit_table(dev, 'P', &P))
322                 return -EINVAL;
323
324         switch (min(len, (u8) 22)) {
325         case 22:
326                 unk21 = e->tUNK_21;
327         case 21:
328                 unk20 = e->tUNK_20;
329         case 20:
330                 if (e->tCWL > 0)
331                         t->tCWL = e->tCWL;
332         case 19:
333                 unk18 = e->tUNK_18;
334                 break;
335         }
336
337         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
338
339         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
340                                 max(unk18, (u8) 1) << 16 |
341                                 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
342
343         t->reg[2] = ((t->tCWL - 1) << 24 |
344                     e->tRRD << 16 |
345                     e->tRCDWR << 8 |
346                     e->tRCDRD);
347
348         t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
349
350         t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
351
352         t->reg[8] = boot->reg[8] & 0xffffff00;
353
354         if (P.version == 1) {
355                 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
356
357                 t->reg[3] = (0x14 + e->tCL) << 24 |
358                             0x16 << 16 |
359                             (e->tCL - 1) << 8 |
360                             (e->tCL - 1);
361
362                 t->reg[4] |= boot->reg[4] & 0xffff0000;
363
364                 t->reg[6] = (0x33 - t->tCWL) << 16 |
365                             t->tCWL << 8 |
366                             (0x2e + e->tCL - t->tCWL);
367
368                 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
369
370                 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
371                 if (nvfb_vram_type(dev) == NV_MEM_TYPE_DDR2) {
372                         t->reg[5] |= (e->tCL + 3) << 8;
373                         t->reg[6] |= (t->tCWL - 2) << 8;
374                         t->reg[8] |= (e->tCL - 4);
375                 } else {
376                         t->reg[5] |= (e->tCL + 2) << 8;
377                         t->reg[6] |= t->tCWL << 8;
378                         t->reg[8] |= (e->tCL - 2);
379                 }
380         } else {
381                 t->reg[1] |= (5 + e->tCL - (t->tCWL));
382
383                 /* XXX: 0xb? 0x30? */
384                 t->reg[3] = (0x30 + e->tCL) << 24 |
385                             (boot->reg[3] & 0x00ff0000)|
386                             (0xb + e->tCL) << 8 |
387                             (e->tCL - 1);
388
389                 t->reg[4] |= (unk20 << 24 | unk21 << 16);
390
391                 /* XXX: +6? */
392                 t->reg[5] |= (t->tCWL + 6) << 8;
393
394                 t->reg[6] = (0x5a + e->tCL) << 16 |
395                             (6 - e->tCL + t->tCWL) << 8 |
396                             (0x50 + e->tCL - t->tCWL);
397
398                 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
399                 t->reg[7] = (tmp7_3 << 24) |
400                             ((tmp7_3 - 6 + e->tCL) << 16) |
401                             0x202;
402         }
403
404         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
405                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
406         NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
407                  t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
408         NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
409         return 0;
410 }
411
412 static int
413 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
414                      struct nouveau_pm_tbl_entry *e, u8 len,
415                      struct nouveau_pm_memtiming *boot,
416                      struct nouveau_pm_memtiming *t)
417 {
418         if (e->tCWL > 0)
419                 t->tCWL = e->tCWL;
420
421         t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
422                      e->tRFC << 8 | e->tRC);
423
424         t->reg[1] = (boot->reg[1] & 0xff000000) |
425                     (e->tRCDWR & 0x0f) << 20 |
426                     (e->tRCDRD & 0x0f) << 14 |
427                     (t->tCWL << 7) |
428                     (e->tCL & 0x0f);
429
430         t->reg[2] = (boot->reg[2] & 0xff0000ff) |
431                     e->tWR << 16 | e->tWTR << 8;
432
433         t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
434                     (e->tUNK_21 & 0xf) << 5 |
435                     (e->tUNK_13 & 0x1f);
436
437         t->reg[4] = (boot->reg[4] & 0xfff00fff) |
438                     (e->tRRD&0x1f) << 15;
439
440         NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
441                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
442         NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
443         return 0;
444 }
445
446 /**
447  * MR generation methods
448  */
449
450 static int
451 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
452                     struct nouveau_pm_tbl_entry *e, u8 len,
453                     struct nouveau_pm_memtiming *boot,
454                     struct nouveau_pm_memtiming *t)
455 {
456         t->drive_strength = 0;
457         if (len < 15) {
458                 t->odt = boot->odt;
459         } else {
460                 t->odt = e->RAM_FT1 & 0x07;
461         }
462
463         if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
464                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
465                 return -ERANGE;
466         }
467
468         if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
469                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
470                 return -ERANGE;
471         }
472
473         if (t->odt > 3) {
474                 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
475                         t->id, t->odt);
476                 t->odt = 0;
477         }
478
479         t->mr[0] = (boot->mr[0] & 0x100f) |
480                    (e->tCL) << 4 |
481                    (e->tWR - 1) << 9;
482         t->mr[1] = (boot->mr[1] & 0x101fbb) |
483                    (t->odt & 0x1) << 2 |
484                    (t->odt & 0x2) << 5;
485
486         NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
487         return 0;
488 }
489
490 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
491         0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
492
493 static int
494 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
495                     struct nouveau_pm_tbl_entry *e, u8 len,
496                     struct nouveau_pm_memtiming *boot,
497                     struct nouveau_pm_memtiming *t)
498 {
499         u8 cl = e->tCL - 4;
500
501         t->drive_strength = 0;
502         if (len < 15) {
503                 t->odt = boot->odt;
504         } else {
505                 t->odt = e->RAM_FT1 & 0x07;
506         }
507
508         if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
509                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
510                 return -ERANGE;
511         }
512
513         if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
514                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
515                 return -ERANGE;
516         }
517
518         if (e->tCWL < 5) {
519                 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
520                 return -ERANGE;
521         }
522
523         t->mr[0] = (boot->mr[0] & 0x180b) |
524                    /* CAS */
525                    (cl & 0x7) << 4 |
526                    (cl & 0x8) >> 1 |
527                    (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
528         t->mr[1] = (boot->mr[1] & 0x101dbb) |
529                    (t->odt & 0x1) << 2 |
530                    (t->odt & 0x2) << 5 |
531                    (t->odt & 0x4) << 7;
532         t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
533
534         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
535         return 0;
536 }
537
538 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
539         0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
540 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
541         0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
542
543 static int
544 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
545                      struct nouveau_pm_tbl_entry *e, u8 len,
546                      struct nouveau_pm_memtiming *boot,
547                      struct nouveau_pm_memtiming *t)
548 {
549         if (len < 15) {
550                 t->drive_strength = boot->drive_strength;
551                 t->odt = boot->odt;
552         } else {
553                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
554                 t->odt = e->RAM_FT1 & 0x07;
555         }
556
557         if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
558                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
559                 return -ERANGE;
560         }
561
562         if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
563                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
564                 return -ERANGE;
565         }
566
567         if (t->odt > 3) {
568                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
569                         t->id, t->odt);
570                 t->odt = 0;
571         }
572
573         t->mr[0] = (boot->mr[0] & 0xe0b) |
574                    /* CAS */
575                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
576                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
577         t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
578                    (t->odt << 2) |
579                    (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
580         t->mr[2] = boot->mr[2];
581
582         NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
583                       t->mr[0], t->mr[1], t->mr[2]);
584         return 0;
585 }
586
587 static int
588 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
589                      struct nouveau_pm_tbl_entry *e, u8 len,
590                      struct nouveau_pm_memtiming *boot,
591                      struct nouveau_pm_memtiming *t)
592 {
593         if (len < 15) {
594                 t->drive_strength = boot->drive_strength;
595                 t->odt = boot->odt;
596         } else {
597                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
598                 t->odt = e->RAM_FT1 & 0x03;
599         }
600
601         if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
602                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
603                 return -ERANGE;
604         }
605
606         if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
607                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
608                 return -ERANGE;
609         }
610
611         if (t->odt > 3) {
612                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
613                         t->id, t->odt);
614                 t->odt = 0;
615         }
616
617         t->mr[0] = (boot->mr[0] & 0x007) |
618                    ((e->tCL - 5) << 3) |
619                    ((e->tWR - 4) << 8);
620         t->mr[1] = (boot->mr[1] & 0x1007f0) |
621                    t->drive_strength |
622                    (t->odt << 2);
623
624         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
625         return 0;
626 }
627
628 int
629 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
630                         struct nouveau_pm_memtiming *t)
631 {
632         struct drm_nouveau_private *dev_priv = dev->dev_private;
633         struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
634         struct nouveau_pm_memtiming *boot = &pm->boot.timing;
635         struct nouveau_pm_tbl_entry *e;
636         u8 ver, len, *ptr, *ramcfg;
637         int ret;
638
639         ptr = nouveau_perf_timing(dev, freq, &ver, &len);
640         if (!ptr || ptr[0] == 0x00) {
641                 *t = *boot;
642                 return 0;
643         }
644         e = (struct nouveau_pm_tbl_entry *)ptr;
645
646         t->tCWL = boot->tCWL;
647
648         switch (dev_priv->card_type) {
649         case NV_40:
650                 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
651                 break;
652         case NV_50:
653                 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
654                 break;
655         case NV_C0:
656         case NV_D0:
657                 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
658                 break;
659         default:
660                 ret = -ENODEV;
661                 break;
662         }
663
664         switch (nvfb_vram_type(dev) * !ret) {
665         case NV_MEM_TYPE_GDDR3:
666                 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
667                 break;
668         case NV_MEM_TYPE_GDDR5:
669                 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
670                 break;
671         case NV_MEM_TYPE_DDR2:
672                 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
673                 break;
674         case NV_MEM_TYPE_DDR3:
675                 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
676                 break;
677         default:
678                 ret = -EINVAL;
679                 break;
680         }
681
682         ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
683         if (ramcfg) {
684                 int dll_off;
685
686                 if (ver == 0x00)
687                         dll_off = !!(ramcfg[3] & 0x04);
688                 else
689                         dll_off = !!(ramcfg[2] & 0x40);
690
691                 switch (nvfb_vram_type(dev)) {
692                 case NV_MEM_TYPE_GDDR3:
693                         t->mr[1] &= ~0x00000040;
694                         t->mr[1] |=  0x00000040 * dll_off;
695                         break;
696                 default:
697                         t->mr[1] &= ~0x00000001;
698                         t->mr[1] |=  0x00000001 * dll_off;
699                         break;
700                 }
701         }
702
703         return ret;
704 }
705
706 void
707 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
708 {
709         struct drm_nouveau_private *dev_priv = dev->dev_private;
710         u32 timing_base, timing_regs, mr_base;
711         int i;
712
713         if (dev_priv->card_type >= 0xC0) {
714                 timing_base = 0x10f290;
715                 mr_base = 0x10f300;
716         } else {
717                 timing_base = 0x100220;
718                 mr_base = 0x1002c0;
719         }
720
721         t->id = -1;
722
723         switch (dev_priv->card_type) {
724         case NV_50:
725                 timing_regs = 9;
726                 break;
727         case NV_C0:
728         case NV_D0:
729                 timing_regs = 5;
730                 break;
731         case NV_30:
732         case NV_40:
733                 timing_regs = 3;
734                 break;
735         default:
736                 timing_regs = 0;
737                 return;
738         }
739         for(i = 0; i < timing_regs; i++)
740                 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
741
742         t->tCWL = 0;
743         if (dev_priv->card_type < NV_C0) {
744                 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
745         } else if (dev_priv->card_type <= NV_D0) {
746                 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
747         }
748
749         t->mr[0] = nv_rd32(dev, mr_base);
750         t->mr[1] = nv_rd32(dev, mr_base + 0x04);
751         t->mr[2] = nv_rd32(dev, mr_base + 0x20);
752         t->mr[3] = nv_rd32(dev, mr_base + 0x24);
753
754         t->odt = 0;
755         t->drive_strength = 0;
756
757         switch (nvfb_vram_type(dev)) {
758         case NV_MEM_TYPE_DDR3:
759                 t->odt |= (t->mr[1] & 0x200) >> 7;
760         case NV_MEM_TYPE_DDR2:
761                 t->odt |= (t->mr[1] & 0x04) >> 2 |
762                           (t->mr[1] & 0x40) >> 5;
763                 break;
764         case NV_MEM_TYPE_GDDR3:
765         case NV_MEM_TYPE_GDDR5:
766                 t->drive_strength = t->mr[1] & 0x03;
767                 t->odt = (t->mr[1] & 0x0c) >> 2;
768                 break;
769         default:
770                 break;
771         }
772 }
773
774 int
775 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
776                  struct nouveau_pm_level *perflvl)
777 {
778         struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
779         struct nouveau_pm_memtiming *info = &perflvl->timing;
780         u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
781         u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
782         u32 mr1_dlloff;
783
784         switch (nvfb_vram_type(dev_priv->dev)) {
785         case NV_MEM_TYPE_DDR2:
786                 tDLLK = 2000;
787                 mr1_dlloff = 0x00000001;
788                 break;
789         case NV_MEM_TYPE_DDR3:
790                 tDLLK = 12000;
791                 tCKSRE = 2000;
792                 tXS = 1000;
793                 mr1_dlloff = 0x00000001;
794                 break;
795         case NV_MEM_TYPE_GDDR3:
796                 tDLLK = 40000;
797                 mr1_dlloff = 0x00000040;
798                 break;
799         default:
800                 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
801                 return -ENODEV;
802         }
803
804         /* fetch current MRs */
805         switch (nvfb_vram_type(dev_priv->dev)) {
806         case NV_MEM_TYPE_GDDR3:
807         case NV_MEM_TYPE_DDR3:
808                 mr[2] = exec->mrg(exec, 2);
809         default:
810                 mr[1] = exec->mrg(exec, 1);
811                 mr[0] = exec->mrg(exec, 0);
812                 break;
813         }
814
815         /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
816         if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
817                 exec->precharge(exec);
818                 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
819                 exec->wait(exec, tMRD);
820         }
821
822         /* enter self-refresh mode */
823         exec->precharge(exec);
824         exec->refresh(exec);
825         exec->refresh(exec);
826         exec->refresh_auto(exec, false);
827         exec->refresh_self(exec, true);
828         exec->wait(exec, tCKSRE);
829
830         /* modify input clock frequency */
831         exec->clock_set(exec);
832
833         /* exit self-refresh mode */
834         exec->wait(exec, tCKSRX);
835         exec->precharge(exec);
836         exec->refresh_self(exec, false);
837         exec->refresh_auto(exec, true);
838         exec->wait(exec, tXS);
839         exec->wait(exec, tXS);
840
841         /* update MRs */
842         if (mr[2] != info->mr[2]) {
843                 exec->mrs (exec, 2, info->mr[2]);
844                 exec->wait(exec, tMRD);
845         }
846
847         if (mr[1] != info->mr[1]) {
848                 /* need to keep DLL off until later, at least on GDDR3 */
849                 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
850                 exec->wait(exec, tMRD);
851         }
852
853         if (mr[0] != info->mr[0]) {
854                 exec->mrs (exec, 0, info->mr[0]);
855                 exec->wait(exec, tMRD);
856         }
857
858         /* update PFB timing registers */
859         exec->timing_set(exec);
860
861         /* DLL (enable + ) reset */
862         if (!(info->mr[1] & mr1_dlloff)) {
863                 if (mr[1] & mr1_dlloff) {
864                         exec->mrs (exec, 1, info->mr[1]);
865                         exec->wait(exec, tMRD);
866                 }
867                 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
868                 exec->wait(exec, tMRD);
869                 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
870                 exec->wait(exec, tMRD);
871                 exec->wait(exec, tDLLK);
872                 if (nvfb_vram_type(dev_priv->dev) == NV_MEM_TYPE_GDDR3)
873                         exec->precharge(exec);
874         }
875
876         return 0;
877 }
878
879 int
880 nouveau_mem_vbios_type(struct drm_device *dev)
881 {
882         struct bit_entry M;
883         u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
884         if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
885                 u8 *table = ROMPTR(dev, M.data[3]);
886                 if (table && table[0] == 0x10 && ramcfg < table[3]) {
887                         u8 *entry = table + table[1] + (ramcfg * table[2]);
888                         switch (entry[0] & 0x0f) {
889                         case 0: return NV_MEM_TYPE_DDR2;
890                         case 1: return NV_MEM_TYPE_DDR3;
891                         case 2: return NV_MEM_TYPE_GDDR3;
892                         case 3: return NV_MEM_TYPE_GDDR5;
893                         default:
894                                 break;
895                         }
896
897                 }
898         }
899         return NV_MEM_TYPE_UNKNOWN;
900 }
901
902 static int
903 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
904 {
905         /* nothing to do */
906         return 0;
907 }
908
909 static int
910 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
911 {
912         /* nothing to do */
913         return 0;
914 }
915
916 static inline void
917 nouveau_mem_node_cleanup(struct nouveau_mem *node)
918 {
919         if (node->vma[0].node) {
920                 nouveau_vm_unmap(&node->vma[0]);
921                 nouveau_vm_put(&node->vma[0]);
922         }
923
924         if (node->vma[1].node) {
925                 nouveau_vm_unmap(&node->vma[1]);
926                 nouveau_vm_put(&node->vma[1]);
927         }
928 }
929
930 static void
931 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
932                          struct ttm_mem_reg *mem)
933 {
934         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
935         struct drm_device *dev = dev_priv->dev;
936
937         nouveau_mem_node_cleanup(mem->mm_node);
938         nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
939 }
940
941 static int
942 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
943                          struct ttm_buffer_object *bo,
944                          struct ttm_placement *placement,
945                          struct ttm_mem_reg *mem)
946 {
947         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
948         struct drm_device *dev = dev_priv->dev;
949         struct nouveau_bo *nvbo = nouveau_bo(bo);
950         struct nouveau_mem *node;
951         u32 size_nc = 0;
952         int ret;
953
954         if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
955                 size_nc = 1 << nvbo->page_shift;
956
957         ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
958                         mem->page_alignment << PAGE_SHIFT, size_nc,
959                         (nvbo->tile_flags >> 8) & 0x3ff, &node);
960         if (ret) {
961                 mem->mm_node = NULL;
962                 return (ret == -ENOSPC) ? 0 : ret;
963         }
964
965         node->page_shift = nvbo->page_shift;
966
967         mem->mm_node = node;
968         mem->start   = node->offset >> PAGE_SHIFT;
969         return 0;
970 }
971
972 void
973 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
974 {
975         struct nouveau_mm *mm = man->priv;
976         struct nouveau_mm_node *r;
977         u32 total = 0, free = 0;
978
979         mutex_lock(&mm->mutex);
980         list_for_each_entry(r, &mm->nodes, nl_entry) {
981                 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
982                        prefix, r->type, ((u64)r->offset << 12),
983                        (((u64)r->offset + r->length) << 12));
984
985                 total += r->length;
986                 if (!r->type)
987                         free += r->length;
988         }
989         mutex_unlock(&mm->mutex);
990
991         printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
992                prefix, (u64)total << 12, (u64)free << 12);
993         printk(KERN_DEBUG "%s  block: 0x%08x\n",
994                prefix, mm->block_size << 12);
995 }
996
997 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
998         nouveau_vram_manager_init,
999         nouveau_vram_manager_fini,
1000         nouveau_vram_manager_new,
1001         nouveau_vram_manager_del,
1002         nouveau_vram_manager_debug
1003 };
1004
1005 static int
1006 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1007 {
1008         return 0;
1009 }
1010
1011 static int
1012 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1013 {
1014         return 0;
1015 }
1016
1017 static void
1018 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1019                          struct ttm_mem_reg *mem)
1020 {
1021         nouveau_mem_node_cleanup(mem->mm_node);
1022         kfree(mem->mm_node);
1023         mem->mm_node = NULL;
1024 }
1025
1026 static int
1027 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1028                          struct ttm_buffer_object *bo,
1029                          struct ttm_placement *placement,
1030                          struct ttm_mem_reg *mem)
1031 {
1032         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1033         struct nouveau_mem *node;
1034
1035         if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1036                      dev_priv->gart_info.aper_size))
1037                 return -ENOMEM;
1038
1039         node = kzalloc(sizeof(*node), GFP_KERNEL);
1040         if (!node)
1041                 return -ENOMEM;
1042         node->page_shift = 12;
1043
1044         mem->mm_node = node;
1045         mem->start   = 0;
1046         return 0;
1047 }
1048
1049 void
1050 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1051 {
1052 }
1053
1054 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1055         nouveau_gart_manager_init,
1056         nouveau_gart_manager_fini,
1057         nouveau_gart_manager_new,
1058         nouveau_gart_manager_del,
1059         nouveau_gart_manager_debug
1060 };