]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/nouveau/nouveau_dma.c
UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[~andy/linux] / drivers / gpu / drm / nouveau / nouveau_dma.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <drm/drmP.h>
28 #include "nouveau_drv.h"
29 #include "nouveau_dma.h"
30 #include "nouveau_ramht.h"
31
32 void
33 nouveau_dma_init(struct nouveau_channel *chan)
34 {
35         struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36         struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38         if (dev_priv->card_type >= NV_50) {
39                 const int ib_size = pushbuf->bo.mem.size / 2;
40
41                 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42                 chan->dma.ib_max = (ib_size / 8) - 1;
43                 chan->dma.ib_put = 0;
44                 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46                 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47         } else {
48                 chan->dma.max  = (pushbuf->bo.mem.size >> 2) - 2;
49         }
50
51         chan->dma.put  = 0;
52         chan->dma.cur  = chan->dma.put;
53         chan->dma.free = chan->dma.max - chan->dma.cur;
54 }
55
56 void
57 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
58 {
59         bool is_iomem;
60         u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
61         mem = &mem[chan->dma.cur];
62         if (is_iomem)
63                 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
64         else
65                 memcpy(mem, data, nr_dwords * 4);
66         chan->dma.cur += nr_dwords;
67 }
68
69 /* Fetch and adjust GPU GET pointer
70  *
71  * Returns:
72  *  value >= 0, the adjusted GET pointer
73  *  -EINVAL if GET pointer currently outside main push buffer
74  *  -EBUSY if timeout exceeded
75  */
76 static inline int
77 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
78 {
79         uint64_t val;
80
81         val = nvchan_rd32(chan, chan->user_get);
82         if (chan->user_get_hi)
83                 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
84
85         /* reset counter as long as GET is still advancing, this is
86          * to avoid misdetecting a GPU lockup if the GPU happens to
87          * just be processing an operation that takes a long time
88          */
89         if (val != *prev_get) {
90                 *prev_get = val;
91                 *timeout = 0;
92         }
93
94         if ((++*timeout & 0xff) == 0) {
95                 DRM_UDELAY(1);
96                 if (*timeout > 100000)
97                         return -EBUSY;
98         }
99
100         if (val < chan->pushbuf_base ||
101             val > chan->pushbuf_base + (chan->dma.max << 2))
102                 return -EINVAL;
103
104         return (val - chan->pushbuf_base) >> 2;
105 }
106
107 void
108 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
109               int delta, int length)
110 {
111         struct nouveau_bo *pb = chan->pushbuf_bo;
112         struct nouveau_vma *vma;
113         int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
114         u64 offset;
115
116         vma = nouveau_bo_vma_find(bo, chan->vm);
117         BUG_ON(!vma);
118         offset = vma->offset + delta;
119
120         BUG_ON(chan->dma.ib_free < 1);
121         nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
122         nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
123
124         chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
125
126         DRM_MEMORYBARRIER();
127         /* Flush writes. */
128         nouveau_bo_rd32(pb, 0);
129
130         nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
131         chan->dma.ib_free--;
132 }
133
134 static int
135 nv50_dma_push_wait(struct nouveau_channel *chan, int count)
136 {
137         uint32_t cnt = 0, prev_get = 0;
138
139         while (chan->dma.ib_free < count) {
140                 uint32_t get = nvchan_rd32(chan, 0x88);
141                 if (get != prev_get) {
142                         prev_get = get;
143                         cnt = 0;
144                 }
145
146                 if ((++cnt & 0xff) == 0) {
147                         DRM_UDELAY(1);
148                         if (cnt > 100000)
149                                 return -EBUSY;
150                 }
151
152                 chan->dma.ib_free = get - chan->dma.ib_put;
153                 if (chan->dma.ib_free <= 0)
154                         chan->dma.ib_free += chan->dma.ib_max;
155         }
156
157         return 0;
158 }
159
160 static int
161 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
162 {
163         uint64_t prev_get = 0;
164         int ret, cnt = 0;
165
166         ret = nv50_dma_push_wait(chan, slots + 1);
167         if (unlikely(ret))
168                 return ret;
169
170         while (chan->dma.free < count) {
171                 int get = READ_GET(chan, &prev_get, &cnt);
172                 if (unlikely(get < 0)) {
173                         if (get == -EINVAL)
174                                 continue;
175
176                         return get;
177                 }
178
179                 if (get <= chan->dma.cur) {
180                         chan->dma.free = chan->dma.max - chan->dma.cur;
181                         if (chan->dma.free >= count)
182                                 break;
183
184                         FIRE_RING(chan);
185                         do {
186                                 get = READ_GET(chan, &prev_get, &cnt);
187                                 if (unlikely(get < 0)) {
188                                         if (get == -EINVAL)
189                                                 continue;
190                                         return get;
191                                 }
192                         } while (get == 0);
193                         chan->dma.cur = 0;
194                         chan->dma.put = 0;
195                 }
196
197                 chan->dma.free = get - chan->dma.cur - 1;
198         }
199
200         return 0;
201 }
202
203 int
204 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
205 {
206         uint64_t prev_get = 0;
207         int cnt = 0, get;
208
209         if (chan->dma.ib_max)
210                 return nv50_dma_wait(chan, slots, size);
211
212         while (chan->dma.free < size) {
213                 get = READ_GET(chan, &prev_get, &cnt);
214                 if (unlikely(get == -EBUSY))
215                         return -EBUSY;
216
217                 /* loop until we have a usable GET pointer.  the value
218                  * we read from the GPU may be outside the main ring if
219                  * PFIFO is processing a buffer called from the main ring,
220                  * discard these values until something sensible is seen.
221                  *
222                  * the other case we discard GET is while the GPU is fetching
223                  * from the SKIPS area, so the code below doesn't have to deal
224                  * with some fun corner cases.
225                  */
226                 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
227                         continue;
228
229                 if (get <= chan->dma.cur) {
230                         /* engine is fetching behind us, or is completely
231                          * idle (GET == PUT) so we have free space up until
232                          * the end of the push buffer
233                          *
234                          * we can only hit that path once per call due to
235                          * looping back to the beginning of the push buffer,
236                          * we'll hit the fetching-ahead-of-us path from that
237                          * point on.
238                          *
239                          * the *one* exception to that rule is if we read
240                          * GET==PUT, in which case the below conditional will
241                          * always succeed and break us out of the wait loop.
242                          */
243                         chan->dma.free = chan->dma.max - chan->dma.cur;
244                         if (chan->dma.free >= size)
245                                 break;
246
247                         /* not enough space left at the end of the push buffer,
248                          * instruct the GPU to jump back to the start right
249                          * after processing the currently pending commands.
250                          */
251                         OUT_RING(chan, chan->pushbuf_base | 0x20000000);
252
253                         /* wait for GET to depart from the skips area.
254                          * prevents writing GET==PUT and causing a race
255                          * condition that causes us to think the GPU is
256                          * idle when it's not.
257                          */
258                         do {
259                                 get = READ_GET(chan, &prev_get, &cnt);
260                                 if (unlikely(get == -EBUSY))
261                                         return -EBUSY;
262                                 if (unlikely(get == -EINVAL))
263                                         continue;
264                         } while (get <= NOUVEAU_DMA_SKIPS);
265                         WRITE_PUT(NOUVEAU_DMA_SKIPS);
266
267                         /* we're now submitting commands at the start of
268                          * the push buffer.
269                          */
270                         chan->dma.cur  =
271                         chan->dma.put  = NOUVEAU_DMA_SKIPS;
272                 }
273
274                 /* engine fetching ahead of us, we have space up until the
275                  * current GET pointer.  the "- 1" is to ensure there's
276                  * space left to emit a jump back to the beginning of the
277                  * push buffer if we require it.  we can never get GET == PUT
278                  * here, so this is safe.
279                  */
280                 chan->dma.free = get - chan->dma.cur - 1;
281         }
282
283         return 0;
284 }
285