]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drm/vmwgfx: Don't commit staged bindings if execbuf fails
[~andy/linux] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66         struct list_head head;
67         struct drm_hash_item hash;
68         struct vmw_resource *res;
69         struct vmw_dma_buffer *new_backup;
70         struct vmw_ctx_binding_state *staged_bindings;
71         unsigned long new_backup_offset;
72         bool first_usage;
73         bool no_buffer_needed;
74 };
75
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84         int (*func) (struct vmw_private *, struct vmw_sw_context *,
85                      SVGA3dCmdHeader *);
86         bool user_allow;
87         bool gb_disable;
88         bool gb_enable;
89 };
90
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
92         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93                                        (_gb_disable), (_gb_enable)}
94
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103                                         bool backoff)
104 {
105         struct vmw_resource_val_node *val;
106
107         list_for_each_entry(val, list, head) {
108                 struct vmw_resource *res = val->res;
109                 struct vmw_dma_buffer *new_backup =
110                         backoff ? NULL : val->new_backup;
111
112                 /*
113                  * Transfer staged context bindings to the
114                  * persistent context binding tracker.
115                  */
116                 if (unlikely(val->staged_bindings)) {
117                         if (!backoff) {
118                                 vmw_context_binding_state_transfer
119                                         (val->res, val->staged_bindings);
120                         }
121                         kfree(val->staged_bindings);
122                         val->staged_bindings = NULL;
123                 }
124                 vmw_resource_unreserve(res, new_backup,
125                         val->new_backup_offset);
126                 vmw_dmabuf_unreference(&val->new_backup);
127         }
128 }
129
130
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141                                 struct vmw_resource *res,
142                                 struct vmw_resource_val_node **p_node)
143 {
144         struct vmw_resource_val_node *node;
145         struct drm_hash_item *hash;
146         int ret;
147
148         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149                                     &hash) == 0)) {
150                 node = container_of(hash, struct vmw_resource_val_node, hash);
151                 node->first_usage = false;
152                 if (unlikely(p_node != NULL))
153                         *p_node = node;
154                 return 0;
155         }
156
157         node = kzalloc(sizeof(*node), GFP_KERNEL);
158         if (unlikely(node == NULL)) {
159                 DRM_ERROR("Failed to allocate a resource validation "
160                           "entry.\n");
161                 return -ENOMEM;
162         }
163
164         node->hash.key = (unsigned long) res;
165         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166         if (unlikely(ret != 0)) {
167                 DRM_ERROR("Failed to initialize a resource validation "
168                           "entry.\n");
169                 kfree(node);
170                 return ret;
171         }
172         list_add_tail(&node->head, &sw_context->resource_list);
173         node->res = vmw_resource_reference(res);
174         node->first_usage = true;
175
176         if (unlikely(p_node != NULL))
177                 *p_node = node;
178
179         return 0;
180 }
181
182 /**
183  * vmw_resource_relocation_add - Add a relocation to the relocation list
184  *
185  * @list: Pointer to head of relocation list.
186  * @res: The resource.
187  * @offset: Offset into the command buffer currently being parsed where the
188  * id that needs fixup is located. Granularity is 4 bytes.
189  */
190 static int vmw_resource_relocation_add(struct list_head *list,
191                                        const struct vmw_resource *res,
192                                        unsigned long offset)
193 {
194         struct vmw_resource_relocation *rel;
195
196         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
197         if (unlikely(rel == NULL)) {
198                 DRM_ERROR("Failed to allocate a resource relocation.\n");
199                 return -ENOMEM;
200         }
201
202         rel->res = res;
203         rel->offset = offset;
204         list_add_tail(&rel->head, list);
205
206         return 0;
207 }
208
209 /**
210  * vmw_resource_relocations_free - Free all relocations on a list
211  *
212  * @list: Pointer to the head of the relocation list.
213  */
214 static void vmw_resource_relocations_free(struct list_head *list)
215 {
216         struct vmw_resource_relocation *rel, *n;
217
218         list_for_each_entry_safe(rel, n, list, head) {
219                 list_del(&rel->head);
220                 kfree(rel);
221         }
222 }
223
224 /**
225  * vmw_resource_relocations_apply - Apply all relocations on a list
226  *
227  * @cb: Pointer to the start of the command buffer bein patch. This need
228  * not be the same buffer as the one being parsed when the relocation
229  * list was built, but the contents must be the same modulo the
230  * resource ids.
231  * @list: Pointer to the head of the relocation list.
232  */
233 static void vmw_resource_relocations_apply(uint32_t *cb,
234                                            struct list_head *list)
235 {
236         struct vmw_resource_relocation *rel;
237
238         list_for_each_entry(rel, list, head)
239                 cb[rel->offset] = rel->res->id;
240 }
241
242 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
243                            struct vmw_sw_context *sw_context,
244                            SVGA3dCmdHeader *header)
245 {
246         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
247 }
248
249 static int vmw_cmd_ok(struct vmw_private *dev_priv,
250                       struct vmw_sw_context *sw_context,
251                       SVGA3dCmdHeader *header)
252 {
253         return 0;
254 }
255
256 /**
257  * vmw_bo_to_validate_list - add a bo to a validate list
258  *
259  * @sw_context: The software context used for this command submission batch.
260  * @bo: The buffer object to add.
261  * @validate_as_mob: Validate this buffer as a MOB.
262  * @p_val_node: If non-NULL Will be updated with the validate node number
263  * on return.
264  *
265  * Returns -EINVAL if the limit of number of buffer objects per command
266  * submission is reached.
267  */
268 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
269                                    struct ttm_buffer_object *bo,
270                                    bool validate_as_mob,
271                                    uint32_t *p_val_node)
272 {
273         uint32_t val_node;
274         struct vmw_validate_buffer *vval_buf;
275         struct ttm_validate_buffer *val_buf;
276         struct drm_hash_item *hash;
277         int ret;
278
279         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
280                                     &hash) == 0)) {
281                 vval_buf = container_of(hash, struct vmw_validate_buffer,
282                                         hash);
283                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
284                         DRM_ERROR("Inconsistent buffer usage.\n");
285                         return -EINVAL;
286                 }
287                 val_buf = &vval_buf->base;
288                 val_node = vval_buf - sw_context->val_bufs;
289         } else {
290                 val_node = sw_context->cur_val_buf;
291                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
292                         DRM_ERROR("Max number of DMA buffers per submission "
293                                   "exceeded.\n");
294                         return -EINVAL;
295                 }
296                 vval_buf = &sw_context->val_bufs[val_node];
297                 vval_buf->hash.key = (unsigned long) bo;
298                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
299                 if (unlikely(ret != 0)) {
300                         DRM_ERROR("Failed to initialize a buffer validation "
301                                   "entry.\n");
302                         return ret;
303                 }
304                 ++sw_context->cur_val_buf;
305                 val_buf = &vval_buf->base;
306                 val_buf->bo = ttm_bo_reference(bo);
307                 val_buf->reserved = false;
308                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
309                 vval_buf->validate_as_mob = validate_as_mob;
310         }
311
312         sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
313
314         if (p_val_node)
315                 *p_val_node = val_node;
316
317         return 0;
318 }
319
320 /**
321  * vmw_resources_reserve - Reserve all resources on the sw_context's
322  * resource list.
323  *
324  * @sw_context: Pointer to the software context.
325  *
326  * Note that since vmware's command submission currently is protected by
327  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
328  * since only a single thread at once will attempt this.
329  */
330 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
331 {
332         struct vmw_resource_val_node *val;
333         int ret;
334
335         list_for_each_entry(val, &sw_context->resource_list, head) {
336                 struct vmw_resource *res = val->res;
337
338                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
339                 if (unlikely(ret != 0))
340                         return ret;
341
342                 if (res->backup) {
343                         struct ttm_buffer_object *bo = &res->backup->base;
344
345                         ret = vmw_bo_to_validate_list
346                                 (sw_context, bo,
347                                  vmw_resource_needs_backup(res), NULL);
348
349                         if (unlikely(ret != 0))
350                                 return ret;
351                 }
352         }
353         return 0;
354 }
355
356 /**
357  * vmw_resources_validate - Validate all resources on the sw_context's
358  * resource list.
359  *
360  * @sw_context: Pointer to the software context.
361  *
362  * Before this function is called, all resource backup buffers must have
363  * been validated.
364  */
365 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
366 {
367         struct vmw_resource_val_node *val;
368         int ret;
369
370         list_for_each_entry(val, &sw_context->resource_list, head) {
371                 struct vmw_resource *res = val->res;
372
373                 ret = vmw_resource_validate(res);
374                 if (unlikely(ret != 0)) {
375                         if (ret != -ERESTARTSYS)
376                                 DRM_ERROR("Failed to validate resource.\n");
377                         return ret;
378                 }
379         }
380         return 0;
381 }
382
383 /**
384  * vmw_cmd_res_check - Check that a resource is present and if so, put it
385  * on the resource validate list unless it's already there.
386  *
387  * @dev_priv: Pointer to a device private structure.
388  * @sw_context: Pointer to the software context.
389  * @res_type: Resource type.
390  * @converter: User-space visisble type specific information.
391  * @id: Pointer to the location in the command buffer currently being
392  * parsed from where the user-space resource id handle is located.
393  */
394 static int vmw_cmd_res_check(struct vmw_private *dev_priv,
395                              struct vmw_sw_context *sw_context,
396                              enum vmw_res_type res_type,
397                              const struct vmw_user_resource_conv *converter,
398                              uint32_t *id,
399                              struct vmw_resource_val_node **p_val)
400 {
401         struct vmw_res_cache_entry *rcache =
402                 &sw_context->res_cache[res_type];
403         struct vmw_resource *res;
404         struct vmw_resource_val_node *node;
405         int ret;
406
407         if (*id == SVGA3D_INVALID_ID) {
408                 if (p_val)
409                         *p_val = NULL;
410                 if (res_type == vmw_res_context) {
411                         DRM_ERROR("Illegal context invalid id.\n");
412                         return -EINVAL;
413                 }
414                 return 0;
415         }
416
417         /*
418          * Fastpath in case of repeated commands referencing the same
419          * resource
420          */
421
422         if (likely(rcache->valid && *id == rcache->handle)) {
423                 const struct vmw_resource *res = rcache->res;
424
425                 rcache->node->first_usage = false;
426                 if (p_val)
427                         *p_val = rcache->node;
428
429                 return vmw_resource_relocation_add
430                         (&sw_context->res_relocations, res,
431                          id - sw_context->buf_start);
432         }
433
434         ret = vmw_user_resource_lookup_handle(dev_priv,
435                                               sw_context->tfile,
436                                               *id,
437                                               converter,
438                                               &res);
439         if (unlikely(ret != 0)) {
440                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
441                           (unsigned) *id);
442                 dump_stack();
443                 return ret;
444         }
445
446         rcache->valid = true;
447         rcache->res = res;
448         rcache->handle = *id;
449
450         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
451                                           res,
452                                           id - sw_context->buf_start);
453         if (unlikely(ret != 0))
454                 goto out_no_reloc;
455
456         ret = vmw_resource_val_add(sw_context, res, &node);
457         if (unlikely(ret != 0))
458                 goto out_no_reloc;
459
460         rcache->node = node;
461         if (p_val)
462                 *p_val = node;
463
464         if (node->first_usage && res_type == vmw_res_context) {
465                 node->staged_bindings =
466                         kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
467                 if (node->staged_bindings == NULL) {
468                         DRM_ERROR("Failed to allocate context binding "
469                                   "information.\n");
470                         goto out_no_reloc;
471                 }
472                 INIT_LIST_HEAD(&node->staged_bindings->list);
473         }
474
475         vmw_resource_unreference(&res);
476         return 0;
477
478 out_no_reloc:
479         BUG_ON(sw_context->error_resource != NULL);
480         sw_context->error_resource = res;
481
482         return ret;
483 }
484
485 /**
486  * vmw_cmd_cid_check - Check a command header for valid context information.
487  *
488  * @dev_priv: Pointer to a device private structure.
489  * @sw_context: Pointer to the software context.
490  * @header: A command header with an embedded user-space context handle.
491  *
492  * Convenience function: Call vmw_cmd_res_check with the user-space context
493  * handle embedded in @header.
494  */
495 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
496                              struct vmw_sw_context *sw_context,
497                              SVGA3dCmdHeader *header)
498 {
499         struct vmw_cid_cmd {
500                 SVGA3dCmdHeader header;
501                 __le32 cid;
502         } *cmd;
503
504         cmd = container_of(header, struct vmw_cid_cmd, header);
505         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
506                                  user_context_converter, &cmd->cid, NULL);
507 }
508
509 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
510                                            struct vmw_sw_context *sw_context,
511                                            SVGA3dCmdHeader *header)
512 {
513         struct vmw_sid_cmd {
514                 SVGA3dCmdHeader header;
515                 SVGA3dCmdSetRenderTarget body;
516         } *cmd;
517         struct vmw_resource_val_node *ctx_node;
518         struct vmw_resource_val_node *res_node;
519         int ret;
520
521         cmd = container_of(header, struct vmw_sid_cmd, header);
522
523         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
524                                 user_context_converter, &cmd->body.cid,
525                                 &ctx_node);
526         if (unlikely(ret != 0))
527                 return ret;
528
529         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
530                                 user_surface_converter,
531                                 &cmd->body.target.sid, &res_node);
532         if (unlikely(ret != 0))
533                 return ret;
534
535         if (dev_priv->has_mob) {
536                 struct vmw_ctx_bindinfo bi;
537
538                 bi.ctx = ctx_node->res;
539                 bi.res = res_node ? res_node->res : NULL;
540                 bi.bt = vmw_ctx_binding_rt;
541                 bi.i1.rt_type = cmd->body.type;
542                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
543         }
544
545         return 0;
546 }
547
548 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
549                                       struct vmw_sw_context *sw_context,
550                                       SVGA3dCmdHeader *header)
551 {
552         struct vmw_sid_cmd {
553                 SVGA3dCmdHeader header;
554                 SVGA3dCmdSurfaceCopy body;
555         } *cmd;
556         int ret;
557
558         cmd = container_of(header, struct vmw_sid_cmd, header);
559         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
560                                 user_surface_converter,
561                                 &cmd->body.src.sid, NULL);
562         if (unlikely(ret != 0))
563                 return ret;
564         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
565                                  user_surface_converter,
566                                  &cmd->body.dest.sid, NULL);
567 }
568
569 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
570                                      struct vmw_sw_context *sw_context,
571                                      SVGA3dCmdHeader *header)
572 {
573         struct vmw_sid_cmd {
574                 SVGA3dCmdHeader header;
575                 SVGA3dCmdSurfaceStretchBlt body;
576         } *cmd;
577         int ret;
578
579         cmd = container_of(header, struct vmw_sid_cmd, header);
580         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
581                                 user_surface_converter,
582                                 &cmd->body.src.sid, NULL);
583         if (unlikely(ret != 0))
584                 return ret;
585         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
586                                  user_surface_converter,
587                                  &cmd->body.dest.sid, NULL);
588 }
589
590 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
591                                          struct vmw_sw_context *sw_context,
592                                          SVGA3dCmdHeader *header)
593 {
594         struct vmw_sid_cmd {
595                 SVGA3dCmdHeader header;
596                 SVGA3dCmdBlitSurfaceToScreen body;
597         } *cmd;
598
599         cmd = container_of(header, struct vmw_sid_cmd, header);
600
601         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
602                                  user_surface_converter,
603                                  &cmd->body.srcImage.sid, NULL);
604 }
605
606 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
607                                  struct vmw_sw_context *sw_context,
608                                  SVGA3dCmdHeader *header)
609 {
610         struct vmw_sid_cmd {
611                 SVGA3dCmdHeader header;
612                 SVGA3dCmdPresent body;
613         } *cmd;
614
615
616         cmd = container_of(header, struct vmw_sid_cmd, header);
617
618         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
619                                  user_surface_converter, &cmd->body.sid,
620                                  NULL);
621 }
622
623 /**
624  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
625  *
626  * @dev_priv: The device private structure.
627  * @new_query_bo: The new buffer holding query results.
628  * @sw_context: The software context used for this command submission.
629  *
630  * This function checks whether @new_query_bo is suitable for holding
631  * query results, and if another buffer currently is pinned for query
632  * results. If so, the function prepares the state of @sw_context for
633  * switching pinned buffers after successful submission of the current
634  * command batch.
635  */
636 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
637                                        struct ttm_buffer_object *new_query_bo,
638                                        struct vmw_sw_context *sw_context)
639 {
640         struct vmw_res_cache_entry *ctx_entry =
641                 &sw_context->res_cache[vmw_res_context];
642         int ret;
643
644         BUG_ON(!ctx_entry->valid);
645         sw_context->last_query_ctx = ctx_entry->res;
646
647         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
648
649                 if (unlikely(new_query_bo->num_pages > 4)) {
650                         DRM_ERROR("Query buffer too large.\n");
651                         return -EINVAL;
652                 }
653
654                 if (unlikely(sw_context->cur_query_bo != NULL)) {
655                         sw_context->needs_post_query_barrier = true;
656                         ret = vmw_bo_to_validate_list(sw_context,
657                                                       sw_context->cur_query_bo,
658                                                       dev_priv->has_mob, NULL);
659                         if (unlikely(ret != 0))
660                                 return ret;
661                 }
662                 sw_context->cur_query_bo = new_query_bo;
663
664                 ret = vmw_bo_to_validate_list(sw_context,
665                                               dev_priv->dummy_query_bo,
666                                               dev_priv->has_mob, NULL);
667                 if (unlikely(ret != 0))
668                         return ret;
669
670         }
671
672         return 0;
673 }
674
675
676 /**
677  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
678  *
679  * @dev_priv: The device private structure.
680  * @sw_context: The software context used for this command submission batch.
681  *
682  * This function will check if we're switching query buffers, and will then,
683  * issue a dummy occlusion query wait used as a query barrier. When the fence
684  * object following that query wait has signaled, we are sure that all
685  * preceding queries have finished, and the old query buffer can be unpinned.
686  * However, since both the new query buffer and the old one are fenced with
687  * that fence, we can do an asynchronus unpin now, and be sure that the
688  * old query buffer won't be moved until the fence has signaled.
689  *
690  * As mentioned above, both the new - and old query buffers need to be fenced
691  * using a sequence emitted *after* calling this function.
692  */
693 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
694                                      struct vmw_sw_context *sw_context)
695 {
696         /*
697          * The validate list should still hold references to all
698          * contexts here.
699          */
700
701         if (sw_context->needs_post_query_barrier) {
702                 struct vmw_res_cache_entry *ctx_entry =
703                         &sw_context->res_cache[vmw_res_context];
704                 struct vmw_resource *ctx;
705                 int ret;
706
707                 BUG_ON(!ctx_entry->valid);
708                 ctx = ctx_entry->res;
709
710                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
711
712                 if (unlikely(ret != 0))
713                         DRM_ERROR("Out of fifo space for dummy query.\n");
714         }
715
716         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
717                 if (dev_priv->pinned_bo) {
718                         vmw_bo_pin(dev_priv->pinned_bo, false);
719                         ttm_bo_unref(&dev_priv->pinned_bo);
720                 }
721
722                 if (!sw_context->needs_post_query_barrier) {
723                         vmw_bo_pin(sw_context->cur_query_bo, true);
724
725                         /*
726                          * We pin also the dummy_query_bo buffer so that we
727                          * don't need to validate it when emitting
728                          * dummy queries in context destroy paths.
729                          */
730
731                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
732                         dev_priv->dummy_query_bo_pinned = true;
733
734                         BUG_ON(sw_context->last_query_ctx == NULL);
735                         dev_priv->query_cid = sw_context->last_query_ctx->id;
736                         dev_priv->query_cid_valid = true;
737                         dev_priv->pinned_bo =
738                                 ttm_bo_reference(sw_context->cur_query_bo);
739                 }
740         }
741 }
742
743 /**
744  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
745  * handle to a MOB id.
746  *
747  * @dev_priv: Pointer to a device private structure.
748  * @sw_context: The software context used for this command batch validation.
749  * @id: Pointer to the user-space handle to be translated.
750  * @vmw_bo_p: Points to a location that, on successful return will carry
751  * a reference-counted pointer to the DMA buffer identified by the
752  * user-space handle in @id.
753  *
754  * This function saves information needed to translate a user-space buffer
755  * handle to a MOB id. The translation does not take place immediately, but
756  * during a call to vmw_apply_relocations(). This function builds a relocation
757  * list and a list of buffers to validate. The former needs to be freed using
758  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
759  * needs to be freed using vmw_clear_validations.
760  */
761 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
762                                  struct vmw_sw_context *sw_context,
763                                  SVGAMobId *id,
764                                  struct vmw_dma_buffer **vmw_bo_p)
765 {
766         struct vmw_dma_buffer *vmw_bo = NULL;
767         struct ttm_buffer_object *bo;
768         uint32_t handle = *id;
769         struct vmw_relocation *reloc;
770         int ret;
771
772         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
773         if (unlikely(ret != 0)) {
774                 DRM_ERROR("Could not find or use MOB buffer.\n");
775                 return -EINVAL;
776         }
777         bo = &vmw_bo->base;
778
779         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
780                 DRM_ERROR("Max number relocations per submission"
781                           " exceeded\n");
782                 ret = -EINVAL;
783                 goto out_no_reloc;
784         }
785
786         reloc = &sw_context->relocs[sw_context->cur_reloc++];
787         reloc->mob_loc = id;
788         reloc->location = NULL;
789
790         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
791         if (unlikely(ret != 0))
792                 goto out_no_reloc;
793
794         *vmw_bo_p = vmw_bo;
795         return 0;
796
797 out_no_reloc:
798         vmw_dmabuf_unreference(&vmw_bo);
799         vmw_bo_p = NULL;
800         return ret;
801 }
802
803 /**
804  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
805  * handle to a valid SVGAGuestPtr
806  *
807  * @dev_priv: Pointer to a device private structure.
808  * @sw_context: The software context used for this command batch validation.
809  * @ptr: Pointer to the user-space handle to be translated.
810  * @vmw_bo_p: Points to a location that, on successful return will carry
811  * a reference-counted pointer to the DMA buffer identified by the
812  * user-space handle in @id.
813  *
814  * This function saves information needed to translate a user-space buffer
815  * handle to a valid SVGAGuestPtr. The translation does not take place
816  * immediately, but during a call to vmw_apply_relocations().
817  * This function builds a relocation list and a list of buffers to validate.
818  * The former needs to be freed using either vmw_apply_relocations() or
819  * vmw_free_relocations(). The latter needs to be freed using
820  * vmw_clear_validations.
821  */
822 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
823                                    struct vmw_sw_context *sw_context,
824                                    SVGAGuestPtr *ptr,
825                                    struct vmw_dma_buffer **vmw_bo_p)
826 {
827         struct vmw_dma_buffer *vmw_bo = NULL;
828         struct ttm_buffer_object *bo;
829         uint32_t handle = ptr->gmrId;
830         struct vmw_relocation *reloc;
831         int ret;
832
833         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
834         if (unlikely(ret != 0)) {
835                 DRM_ERROR("Could not find or use GMR region.\n");
836                 return -EINVAL;
837         }
838         bo = &vmw_bo->base;
839
840         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
841                 DRM_ERROR("Max number relocations per submission"
842                           " exceeded\n");
843                 ret = -EINVAL;
844                 goto out_no_reloc;
845         }
846
847         reloc = &sw_context->relocs[sw_context->cur_reloc++];
848         reloc->location = ptr;
849
850         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
851         if (unlikely(ret != 0))
852                 goto out_no_reloc;
853
854         *vmw_bo_p = vmw_bo;
855         return 0;
856
857 out_no_reloc:
858         vmw_dmabuf_unreference(&vmw_bo);
859         vmw_bo_p = NULL;
860         return ret;
861 }
862
863 /**
864  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
865  *
866  * @dev_priv: Pointer to a device private struct.
867  * @sw_context: The software context used for this command submission.
868  * @header: Pointer to the command header in the command stream.
869  */
870 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
871                                   struct vmw_sw_context *sw_context,
872                                   SVGA3dCmdHeader *header)
873 {
874         struct vmw_begin_gb_query_cmd {
875                 SVGA3dCmdHeader header;
876                 SVGA3dCmdBeginGBQuery q;
877         } *cmd;
878
879         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
880                            header);
881
882         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
883                                  user_context_converter, &cmd->q.cid,
884                                  NULL);
885 }
886
887 /**
888  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
889  *
890  * @dev_priv: Pointer to a device private struct.
891  * @sw_context: The software context used for this command submission.
892  * @header: Pointer to the command header in the command stream.
893  */
894 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
895                                struct vmw_sw_context *sw_context,
896                                SVGA3dCmdHeader *header)
897 {
898         struct vmw_begin_query_cmd {
899                 SVGA3dCmdHeader header;
900                 SVGA3dCmdBeginQuery q;
901         } *cmd;
902
903         cmd = container_of(header, struct vmw_begin_query_cmd,
904                            header);
905
906         if (unlikely(dev_priv->has_mob)) {
907                 struct {
908                         SVGA3dCmdHeader header;
909                         SVGA3dCmdBeginGBQuery q;
910                 } gb_cmd;
911
912                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
913
914                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
915                 gb_cmd.header.size = cmd->header.size;
916                 gb_cmd.q.cid = cmd->q.cid;
917                 gb_cmd.q.type = cmd->q.type;
918
919                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
920                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
921         }
922
923         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
924                                  user_context_converter, &cmd->q.cid,
925                                  NULL);
926 }
927
928 /**
929  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
930  *
931  * @dev_priv: Pointer to a device private struct.
932  * @sw_context: The software context used for this command submission.
933  * @header: Pointer to the command header in the command stream.
934  */
935 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
936                                 struct vmw_sw_context *sw_context,
937                                 SVGA3dCmdHeader *header)
938 {
939         struct vmw_dma_buffer *vmw_bo;
940         struct vmw_query_cmd {
941                 SVGA3dCmdHeader header;
942                 SVGA3dCmdEndGBQuery q;
943         } *cmd;
944         int ret;
945
946         cmd = container_of(header, struct vmw_query_cmd, header);
947         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
948         if (unlikely(ret != 0))
949                 return ret;
950
951         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
952                                     &cmd->q.mobid,
953                                     &vmw_bo);
954         if (unlikely(ret != 0))
955                 return ret;
956
957         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
958
959         vmw_dmabuf_unreference(&vmw_bo);
960         return ret;
961 }
962
963 /**
964  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
965  *
966  * @dev_priv: Pointer to a device private struct.
967  * @sw_context: The software context used for this command submission.
968  * @header: Pointer to the command header in the command stream.
969  */
970 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
971                              struct vmw_sw_context *sw_context,
972                              SVGA3dCmdHeader *header)
973 {
974         struct vmw_dma_buffer *vmw_bo;
975         struct vmw_query_cmd {
976                 SVGA3dCmdHeader header;
977                 SVGA3dCmdEndQuery q;
978         } *cmd;
979         int ret;
980
981         cmd = container_of(header, struct vmw_query_cmd, header);
982         if (dev_priv->has_mob) {
983                 struct {
984                         SVGA3dCmdHeader header;
985                         SVGA3dCmdEndGBQuery q;
986                 } gb_cmd;
987
988                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
989
990                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
991                 gb_cmd.header.size = cmd->header.size;
992                 gb_cmd.q.cid = cmd->q.cid;
993                 gb_cmd.q.type = cmd->q.type;
994                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
995                 gb_cmd.q.offset = cmd->q.guestResult.offset;
996
997                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
998                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
999         }
1000
1001         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1002         if (unlikely(ret != 0))
1003                 return ret;
1004
1005         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1006                                       &cmd->q.guestResult,
1007                                       &vmw_bo);
1008         if (unlikely(ret != 0))
1009                 return ret;
1010
1011         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1012
1013         vmw_dmabuf_unreference(&vmw_bo);
1014         return ret;
1015 }
1016
1017 /**
1018  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1019  *
1020  * @dev_priv: Pointer to a device private struct.
1021  * @sw_context: The software context used for this command submission.
1022  * @header: Pointer to the command header in the command stream.
1023  */
1024 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1025                                  struct vmw_sw_context *sw_context,
1026                                  SVGA3dCmdHeader *header)
1027 {
1028         struct vmw_dma_buffer *vmw_bo;
1029         struct vmw_query_cmd {
1030                 SVGA3dCmdHeader header;
1031                 SVGA3dCmdWaitForGBQuery q;
1032         } *cmd;
1033         int ret;
1034
1035         cmd = container_of(header, struct vmw_query_cmd, header);
1036         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1037         if (unlikely(ret != 0))
1038                 return ret;
1039
1040         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1041                                     &cmd->q.mobid,
1042                                     &vmw_bo);
1043         if (unlikely(ret != 0))
1044                 return ret;
1045
1046         vmw_dmabuf_unreference(&vmw_bo);
1047         return 0;
1048 }
1049
1050 /**
1051  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1052  *
1053  * @dev_priv: Pointer to a device private struct.
1054  * @sw_context: The software context used for this command submission.
1055  * @header: Pointer to the command header in the command stream.
1056  */
1057 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1058                               struct vmw_sw_context *sw_context,
1059                               SVGA3dCmdHeader *header)
1060 {
1061         struct vmw_dma_buffer *vmw_bo;
1062         struct vmw_query_cmd {
1063                 SVGA3dCmdHeader header;
1064                 SVGA3dCmdWaitForQuery q;
1065         } *cmd;
1066         int ret;
1067
1068         cmd = container_of(header, struct vmw_query_cmd, header);
1069         if (dev_priv->has_mob) {
1070                 struct {
1071                         SVGA3dCmdHeader header;
1072                         SVGA3dCmdWaitForGBQuery q;
1073                 } gb_cmd;
1074
1075                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1076
1077                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1078                 gb_cmd.header.size = cmd->header.size;
1079                 gb_cmd.q.cid = cmd->q.cid;
1080                 gb_cmd.q.type = cmd->q.type;
1081                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1082                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1083
1084                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1085                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1086         }
1087
1088         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1089         if (unlikely(ret != 0))
1090                 return ret;
1091
1092         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1093                                       &cmd->q.guestResult,
1094                                       &vmw_bo);
1095         if (unlikely(ret != 0))
1096                 return ret;
1097
1098         vmw_dmabuf_unreference(&vmw_bo);
1099         return 0;
1100 }
1101
1102 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1103                        struct vmw_sw_context *sw_context,
1104                        SVGA3dCmdHeader *header)
1105 {
1106         struct vmw_dma_buffer *vmw_bo = NULL;
1107         struct vmw_surface *srf = NULL;
1108         struct vmw_dma_cmd {
1109                 SVGA3dCmdHeader header;
1110                 SVGA3dCmdSurfaceDMA dma;
1111         } *cmd;
1112         int ret;
1113
1114         cmd = container_of(header, struct vmw_dma_cmd, header);
1115         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1116                                       &cmd->dma.guest.ptr,
1117                                       &vmw_bo);
1118         if (unlikely(ret != 0))
1119                 return ret;
1120
1121         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1122                                 user_surface_converter, &cmd->dma.host.sid,
1123                                 NULL);
1124         if (unlikely(ret != 0)) {
1125                 if (unlikely(ret != -ERESTARTSYS))
1126                         DRM_ERROR("could not find surface for DMA.\n");
1127                 goto out_no_surface;
1128         }
1129
1130         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1131
1132         vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
1133
1134 out_no_surface:
1135         vmw_dmabuf_unreference(&vmw_bo);
1136         return ret;
1137 }
1138
1139 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1140                         struct vmw_sw_context *sw_context,
1141                         SVGA3dCmdHeader *header)
1142 {
1143         struct vmw_draw_cmd {
1144                 SVGA3dCmdHeader header;
1145                 SVGA3dCmdDrawPrimitives body;
1146         } *cmd;
1147         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1148                 (unsigned long)header + sizeof(*cmd));
1149         SVGA3dPrimitiveRange *range;
1150         uint32_t i;
1151         uint32_t maxnum;
1152         int ret;
1153
1154         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1155         if (unlikely(ret != 0))
1156                 return ret;
1157
1158         cmd = container_of(header, struct vmw_draw_cmd, header);
1159         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1160
1161         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1162                 DRM_ERROR("Illegal number of vertex declarations.\n");
1163                 return -EINVAL;
1164         }
1165
1166         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1167                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1168                                         user_surface_converter,
1169                                         &decl->array.surfaceId, NULL);
1170                 if (unlikely(ret != 0))
1171                         return ret;
1172         }
1173
1174         maxnum = (header->size - sizeof(cmd->body) -
1175                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1176         if (unlikely(cmd->body.numRanges > maxnum)) {
1177                 DRM_ERROR("Illegal number of index ranges.\n");
1178                 return -EINVAL;
1179         }
1180
1181         range = (SVGA3dPrimitiveRange *) decl;
1182         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1183                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1184                                         user_surface_converter,
1185                                         &range->indexArray.surfaceId, NULL);
1186                 if (unlikely(ret != 0))
1187                         return ret;
1188         }
1189         return 0;
1190 }
1191
1192
1193 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1194                              struct vmw_sw_context *sw_context,
1195                              SVGA3dCmdHeader *header)
1196 {
1197         struct vmw_tex_state_cmd {
1198                 SVGA3dCmdHeader header;
1199                 SVGA3dCmdSetTextureState state;
1200         } *cmd;
1201
1202         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1203           ((unsigned long) header + header->size + sizeof(header));
1204         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1205                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1206         struct vmw_resource_val_node *ctx_node;
1207         struct vmw_resource_val_node *res_node;
1208         int ret;
1209
1210         cmd = container_of(header, struct vmw_tex_state_cmd,
1211                            header);
1212
1213         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1214                                 user_context_converter, &cmd->state.cid,
1215                                 &ctx_node);
1216         if (unlikely(ret != 0))
1217                 return ret;
1218
1219         for (; cur_state < last_state; ++cur_state) {
1220                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1221                         continue;
1222
1223                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1224                                         user_surface_converter,
1225                                         &cur_state->value, &res_node);
1226                 if (unlikely(ret != 0))
1227                         return ret;
1228
1229                 if (dev_priv->has_mob) {
1230                         struct vmw_ctx_bindinfo bi;
1231
1232                         bi.ctx = ctx_node->res;
1233                         bi.res = res_node ? res_node->res : NULL;
1234                         bi.bt = vmw_ctx_binding_tex;
1235                         bi.i1.texture_stage = cur_state->stage;
1236                         vmw_context_binding_add(ctx_node->staged_bindings,
1237                                                 &bi);
1238                 }
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1245                                       struct vmw_sw_context *sw_context,
1246                                       void *buf)
1247 {
1248         struct vmw_dma_buffer *vmw_bo;
1249         int ret;
1250
1251         struct {
1252                 uint32_t header;
1253                 SVGAFifoCmdDefineGMRFB body;
1254         } *cmd = buf;
1255
1256         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1257                                       &cmd->body.ptr,
1258                                       &vmw_bo);
1259         if (unlikely(ret != 0))
1260                 return ret;
1261
1262         vmw_dmabuf_unreference(&vmw_bo);
1263
1264         return ret;
1265 }
1266
1267 /**
1268  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1269  *
1270  * @dev_priv: Pointer to a device private struct.
1271  * @sw_context: The software context being used for this batch.
1272  * @res_type: The resource type.
1273  * @converter: Information about user-space binding for this resource type.
1274  * @res_id: Pointer to the user-space resource handle in the command stream.
1275  * @buf_id: Pointer to the user-space backup buffer handle in the command
1276  * stream.
1277  * @backup_offset: Offset of backup into MOB.
1278  *
1279  * This function prepares for registering a switch of backup buffers
1280  * in the resource metadata just prior to unreserving.
1281  */
1282 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1283                                  struct vmw_sw_context *sw_context,
1284                                  enum vmw_res_type res_type,
1285                                  const struct vmw_user_resource_conv
1286                                  *converter,
1287                                  uint32_t *res_id,
1288                                  uint32_t *buf_id,
1289                                  unsigned long backup_offset)
1290 {
1291         int ret;
1292         struct vmw_dma_buffer *dma_buf;
1293         struct vmw_resource_val_node *val_node;
1294
1295         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1296                                 converter, res_id, &val_node);
1297         if (unlikely(ret != 0))
1298                 return ret;
1299
1300         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1301         if (unlikely(ret != 0))
1302                 return ret;
1303
1304         if (val_node->first_usage)
1305                 val_node->no_buffer_needed = true;
1306
1307         vmw_dmabuf_unreference(&val_node->new_backup);
1308         val_node->new_backup = dma_buf;
1309         val_node->new_backup_offset = backup_offset;
1310
1311         return 0;
1312 }
1313
1314 /**
1315  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1316  * command
1317  *
1318  * @dev_priv: Pointer to a device private struct.
1319  * @sw_context: The software context being used for this batch.
1320  * @header: Pointer to the command header in the command stream.
1321  */
1322 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1323                                    struct vmw_sw_context *sw_context,
1324                                    SVGA3dCmdHeader *header)
1325 {
1326         struct vmw_bind_gb_surface_cmd {
1327                 SVGA3dCmdHeader header;
1328                 SVGA3dCmdBindGBSurface body;
1329         } *cmd;
1330
1331         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1332
1333         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1334                                      user_surface_converter,
1335                                      &cmd->body.sid, &cmd->body.mobid,
1336                                      0);
1337 }
1338
1339 /**
1340  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1341  * command
1342  *
1343  * @dev_priv: Pointer to a device private struct.
1344  * @sw_context: The software context being used for this batch.
1345  * @header: Pointer to the command header in the command stream.
1346  */
1347 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1348                                    struct vmw_sw_context *sw_context,
1349                                    SVGA3dCmdHeader *header)
1350 {
1351         struct vmw_gb_surface_cmd {
1352                 SVGA3dCmdHeader header;
1353                 SVGA3dCmdUpdateGBImage body;
1354         } *cmd;
1355
1356         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1357
1358         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1359                                  user_surface_converter,
1360                                  &cmd->body.image.sid, NULL);
1361 }
1362
1363 /**
1364  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1365  * command
1366  *
1367  * @dev_priv: Pointer to a device private struct.
1368  * @sw_context: The software context being used for this batch.
1369  * @header: Pointer to the command header in the command stream.
1370  */
1371 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1372                                      struct vmw_sw_context *sw_context,
1373                                      SVGA3dCmdHeader *header)
1374 {
1375         struct vmw_gb_surface_cmd {
1376                 SVGA3dCmdHeader header;
1377                 SVGA3dCmdUpdateGBSurface body;
1378         } *cmd;
1379
1380         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1381
1382         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1383                                  user_surface_converter,
1384                                  &cmd->body.sid, NULL);
1385 }
1386
1387 /**
1388  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1389  * command
1390  *
1391  * @dev_priv: Pointer to a device private struct.
1392  * @sw_context: The software context being used for this batch.
1393  * @header: Pointer to the command header in the command stream.
1394  */
1395 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1396                                      struct vmw_sw_context *sw_context,
1397                                      SVGA3dCmdHeader *header)
1398 {
1399         struct vmw_gb_surface_cmd {
1400                 SVGA3dCmdHeader header;
1401                 SVGA3dCmdReadbackGBImage body;
1402         } *cmd;
1403
1404         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1405
1406         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1407                                  user_surface_converter,
1408                                  &cmd->body.image.sid, NULL);
1409 }
1410
1411 /**
1412  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1413  * command
1414  *
1415  * @dev_priv: Pointer to a device private struct.
1416  * @sw_context: The software context being used for this batch.
1417  * @header: Pointer to the command header in the command stream.
1418  */
1419 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1420                                        struct vmw_sw_context *sw_context,
1421                                        SVGA3dCmdHeader *header)
1422 {
1423         struct vmw_gb_surface_cmd {
1424                 SVGA3dCmdHeader header;
1425                 SVGA3dCmdReadbackGBSurface body;
1426         } *cmd;
1427
1428         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1429
1430         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1431                                  user_surface_converter,
1432                                  &cmd->body.sid, NULL);
1433 }
1434
1435 /**
1436  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1437  * command
1438  *
1439  * @dev_priv: Pointer to a device private struct.
1440  * @sw_context: The software context being used for this batch.
1441  * @header: Pointer to the command header in the command stream.
1442  */
1443 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1444                                        struct vmw_sw_context *sw_context,
1445                                        SVGA3dCmdHeader *header)
1446 {
1447         struct vmw_gb_surface_cmd {
1448                 SVGA3dCmdHeader header;
1449                 SVGA3dCmdInvalidateGBImage body;
1450         } *cmd;
1451
1452         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1453
1454         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1455                                  user_surface_converter,
1456                                  &cmd->body.image.sid, NULL);
1457 }
1458
1459 /**
1460  * vmw_cmd_invalidate_gb_surface - Validate an
1461  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1462  *
1463  * @dev_priv: Pointer to a device private struct.
1464  * @sw_context: The software context being used for this batch.
1465  * @header: Pointer to the command header in the command stream.
1466  */
1467 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1468                                          struct vmw_sw_context *sw_context,
1469                                          SVGA3dCmdHeader *header)
1470 {
1471         struct vmw_gb_surface_cmd {
1472                 SVGA3dCmdHeader header;
1473                 SVGA3dCmdInvalidateGBSurface body;
1474         } *cmd;
1475
1476         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1477
1478         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1479                                  user_surface_converter,
1480                                  &cmd->body.sid, NULL);
1481 }
1482
1483 /**
1484  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1485  * command
1486  *
1487  * @dev_priv: Pointer to a device private struct.
1488  * @sw_context: The software context being used for this batch.
1489  * @header: Pointer to the command header in the command stream.
1490  */
1491 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1492                               struct vmw_sw_context *sw_context,
1493                               SVGA3dCmdHeader *header)
1494 {
1495         struct vmw_set_shader_cmd {
1496                 SVGA3dCmdHeader header;
1497                 SVGA3dCmdSetShader body;
1498         } *cmd;
1499         struct vmw_resource_val_node *ctx_node;
1500         int ret;
1501
1502         cmd = container_of(header, struct vmw_set_shader_cmd,
1503                            header);
1504
1505         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1506                                 user_context_converter, &cmd->body.cid,
1507                                 &ctx_node);
1508         if (unlikely(ret != 0))
1509                 return ret;
1510
1511         if (dev_priv->has_mob) {
1512                 struct vmw_ctx_bindinfo bi;
1513                 struct vmw_resource_val_node *res_node;
1514
1515                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1516                                         user_shader_converter,
1517                                         &cmd->body.shid, &res_node);
1518                 if (unlikely(ret != 0))
1519                         return ret;
1520
1521                 bi.ctx = ctx_node->res;
1522                 bi.res = res_node ? res_node->res : NULL;
1523                 bi.bt = vmw_ctx_binding_shader;
1524                 bi.i1.shader_type = cmd->body.type;
1525                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1526         }
1527
1528         return 0;
1529 }
1530
1531 /**
1532  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1533  * command
1534  *
1535  * @dev_priv: Pointer to a device private struct.
1536  * @sw_context: The software context being used for this batch.
1537  * @header: Pointer to the command header in the command stream.
1538  */
1539 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1540                                   struct vmw_sw_context *sw_context,
1541                                   SVGA3dCmdHeader *header)
1542 {
1543         struct vmw_bind_gb_shader_cmd {
1544                 SVGA3dCmdHeader header;
1545                 SVGA3dCmdBindGBShader body;
1546         } *cmd;
1547
1548         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1549                            header);
1550
1551         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1552                                      user_shader_converter,
1553                                      &cmd->body.shid, &cmd->body.mobid,
1554                                      cmd->body.offsetInBytes);
1555 }
1556
1557 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1558                                 struct vmw_sw_context *sw_context,
1559                                 void *buf, uint32_t *size)
1560 {
1561         uint32_t size_remaining = *size;
1562         uint32_t cmd_id;
1563
1564         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1565         switch (cmd_id) {
1566         case SVGA_CMD_UPDATE:
1567                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1568                 break;
1569         case SVGA_CMD_DEFINE_GMRFB:
1570                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1571                 break;
1572         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1573                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1574                 break;
1575         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1576                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1577                 break;
1578         default:
1579                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1580                 return -EINVAL;
1581         }
1582
1583         if (*size > size_remaining) {
1584                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1585                           " %u.\n", cmd_id);
1586                 return -EINVAL;
1587         }
1588
1589         if (unlikely(!sw_context->kernel)) {
1590                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1591                 return -EPERM;
1592         }
1593
1594         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1595                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1596
1597         return 0;
1598 }
1599
1600 static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1601         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1602                     false, false, false),
1603         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1604                     false, false, false),
1605         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1606                     true, false, false),
1607         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1608                     true, false, false),
1609         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1610                     true, false, false),
1611         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1612                     false, false, false),
1613         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1614                     false, false, false),
1615         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1616                     true, false, false),
1617         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1618                     true, false, false),
1619         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1620                     true, false, false),
1621         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1622                     &vmw_cmd_set_render_target_check, true, false, false),
1623         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1624                     true, false, false),
1625         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1626                     true, false, false),
1627         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1628                     true, false, false),
1629         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1630                     true, false, false),
1631         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1632                     true, false, false),
1633         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1634                     true, false, false),
1635         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1636                     true, false, false),
1637         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1638                     false, false, false),
1639         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1640                     true, true, false),
1641         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1642                     true, true, false),
1643         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1644                     true, false, false),
1645         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1646                     true, true, false),
1647         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1648                     true, false, false),
1649         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1650                     true, false, false),
1651         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1652                     true, false, false),
1653         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1654                     true, false, false),
1655         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1656                     true, false, false),
1657         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1658                     true, false, false),
1659         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1660                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1661         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1662                     false, false, false),
1663         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1664                     false, false, false),
1665         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1666                     false, false, false),
1667         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1668                     false, false, false),
1669         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1670                     false, false, false),
1671         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1672                     false, false, false),
1673         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1674                     false, false, false),
1675         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1676                     false, false, false),
1677         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1678                     false, false, false),
1679         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1680                     false, false, false),
1681         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1682                     false, false, false),
1683         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1684                     false, false, false),
1685         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1686                     false, false, false),
1687         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1688                     false, false, true),
1689         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1690                     false, false, true),
1691         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1692                     false, false, true),
1693         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1694                     false, false, true),
1695         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1696                     false, false, true),
1697         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1698                     false, false, true),
1699         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1700                     false, false, true),
1701         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1702                     false, false, true),
1703         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1704                     true, false, true),
1705         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1706                     false, false, true),
1707         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1708                     true, false, true),
1709         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1710                     &vmw_cmd_update_gb_surface, true, false, true),
1711         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1712                     &vmw_cmd_readback_gb_image, true, false, true),
1713         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1714                     &vmw_cmd_readback_gb_surface, true, false, true),
1715         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1716                     &vmw_cmd_invalidate_gb_image, true, false, true),
1717         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1718                     &vmw_cmd_invalidate_gb_surface, true, false, true),
1719         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1720                     false, false, true),
1721         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1722                     false, false, true),
1723         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1724                     false, false, true),
1725         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1726                     false, false, true),
1727         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1728                     false, false, true),
1729         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1730                     false, false, true),
1731         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1732                     true, false, true),
1733         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1734                     false, false, true),
1735         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1736                     false, false, false),
1737         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1738                     true, false, true),
1739         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1740                     true, false, true),
1741         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1742                     true, false, true),
1743         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1744                     true, false, true),
1745         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1746                     false, false, true),
1747         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1748                     false, false, true),
1749         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1750                     false, false, true),
1751         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1752                     false, false, true),
1753         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1754                     false, false, true),
1755         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1756                     false, false, true),
1757         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1758                     false, false, true),
1759         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1760                     false, false, true),
1761         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1762                     false, false, true),
1763         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1764                     false, false, true),
1765         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1766                     true, false, true)
1767 };
1768
1769 static int vmw_cmd_check(struct vmw_private *dev_priv,
1770                          struct vmw_sw_context *sw_context,
1771                          void *buf, uint32_t *size)
1772 {
1773         uint32_t cmd_id;
1774         uint32_t size_remaining = *size;
1775         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1776         int ret;
1777         const struct vmw_cmd_entry *entry;
1778         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
1779
1780         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1781         /* Handle any none 3D commands */
1782         if (unlikely(cmd_id < SVGA_CMD_MAX))
1783                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1784
1785
1786         cmd_id = le32_to_cpu(header->id);
1787         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1788
1789         cmd_id -= SVGA_3D_CMD_BASE;
1790         if (unlikely(*size > size_remaining))
1791                 goto out_invalid;
1792
1793         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1794                 goto out_invalid;
1795
1796         entry = &vmw_cmd_entries[cmd_id];
1797         if (unlikely(!entry->user_allow && !sw_context->kernel))
1798                 goto out_privileged;
1799
1800         if (unlikely(entry->gb_disable && gb))
1801                 goto out_old;
1802
1803         if (unlikely(entry->gb_enable && !gb))
1804                 goto out_new;
1805
1806         ret = entry->func(dev_priv, sw_context, header);
1807         if (unlikely(ret != 0))
1808                 goto out_invalid;
1809
1810         return 0;
1811 out_invalid:
1812         DRM_ERROR("Invalid SVGA3D command: %d\n",
1813                   cmd_id + SVGA_3D_CMD_BASE);
1814         return -EINVAL;
1815 out_privileged:
1816         DRM_ERROR("Privileged SVGA3D command: %d\n",
1817                   cmd_id + SVGA_3D_CMD_BASE);
1818         return -EPERM;
1819 out_old:
1820         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1821                   cmd_id + SVGA_3D_CMD_BASE);
1822         return -EINVAL;
1823 out_new:
1824         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
1825                   cmd_id + SVGA_3D_CMD_BASE);
1826         return -EINVAL;
1827 }
1828
1829 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1830                              struct vmw_sw_context *sw_context,
1831                              void *buf,
1832                              uint32_t size)
1833 {
1834         int32_t cur_size = size;
1835         int ret;
1836
1837         sw_context->buf_start = buf;
1838
1839         while (cur_size > 0) {
1840                 size = cur_size;
1841                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1842                 if (unlikely(ret != 0))
1843                         return ret;
1844                 buf = (void *)((unsigned long) buf + size);
1845                 cur_size -= size;
1846         }
1847
1848         if (unlikely(cur_size != 0)) {
1849                 DRM_ERROR("Command verifier out of sync.\n");
1850                 return -EINVAL;
1851         }
1852
1853         return 0;
1854 }
1855
1856 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1857 {
1858         sw_context->cur_reloc = 0;
1859 }
1860
1861 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1862 {
1863         uint32_t i;
1864         struct vmw_relocation *reloc;
1865         struct ttm_validate_buffer *validate;
1866         struct ttm_buffer_object *bo;
1867
1868         for (i = 0; i < sw_context->cur_reloc; ++i) {
1869                 reloc = &sw_context->relocs[i];
1870                 validate = &sw_context->val_bufs[reloc->index].base;
1871                 bo = validate->bo;
1872                 switch (bo->mem.mem_type) {
1873                 case TTM_PL_VRAM:
1874                         reloc->location->offset += bo->offset;
1875                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
1876                         break;
1877                 case VMW_PL_GMR:
1878                         reloc->location->gmrId = bo->mem.start;
1879                         break;
1880                 case VMW_PL_MOB:
1881                         *reloc->mob_loc = bo->mem.start;
1882                         break;
1883                 default:
1884                         BUG();
1885                 }
1886         }
1887         vmw_free_relocations(sw_context);
1888 }
1889
1890 /**
1891  * vmw_resource_list_unrefererence - Free up a resource list and unreference
1892  * all resources referenced by it.
1893  *
1894  * @list: The resource list.
1895  */
1896 static void vmw_resource_list_unreference(struct list_head *list)
1897 {
1898         struct vmw_resource_val_node *val, *val_next;
1899
1900         /*
1901          * Drop references to resources held during command submission.
1902          */
1903
1904         list_for_each_entry_safe(val, val_next, list, head) {
1905                 list_del_init(&val->head);
1906                 vmw_resource_unreference(&val->res);
1907                 if (unlikely(val->staged_bindings))
1908                         kfree(val->staged_bindings);
1909                 kfree(val);
1910         }
1911 }
1912
1913 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1914 {
1915         struct vmw_validate_buffer *entry, *next;
1916         struct vmw_resource_val_node *val;
1917
1918         /*
1919          * Drop references to DMA buffers held during command submission.
1920          */
1921         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
1922                                  base.head) {
1923                 list_del(&entry->base.head);
1924                 ttm_bo_unref(&entry->base.bo);
1925                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
1926                 sw_context->cur_val_buf--;
1927         }
1928         BUG_ON(sw_context->cur_val_buf != 0);
1929
1930         list_for_each_entry(val, &sw_context->resource_list, head)
1931                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
1932 }
1933
1934 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1935                                       struct ttm_buffer_object *bo,
1936                                       bool validate_as_mob)
1937 {
1938         int ret;
1939
1940
1941         /*
1942          * Don't validate pinned buffers.
1943          */
1944
1945         if (bo == dev_priv->pinned_bo ||
1946             (bo == dev_priv->dummy_query_bo &&
1947              dev_priv->dummy_query_bo_pinned))
1948                 return 0;
1949
1950         if (validate_as_mob)
1951                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1952
1953         /**
1954          * Put BO in VRAM if there is space, otherwise as a GMR.
1955          * If there is no space in VRAM and GMR ids are all used up,
1956          * start evicting GMRs to make room. If the DMA buffer can't be
1957          * used as a GMR, this will return -ENOMEM.
1958          */
1959
1960         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1961         if (likely(ret == 0 || ret == -ERESTARTSYS))
1962                 return ret;
1963
1964         /**
1965          * If that failed, try VRAM again, this time evicting
1966          * previous contents.
1967          */
1968
1969         DRM_INFO("Falling through to VRAM.\n");
1970         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1971         return ret;
1972 }
1973
1974 static int vmw_validate_buffers(struct vmw_private *dev_priv,
1975                                 struct vmw_sw_context *sw_context)
1976 {
1977         struct vmw_validate_buffer *entry;
1978         int ret;
1979
1980         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1981                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1982                                                  entry->validate_as_mob);
1983                 if (unlikely(ret != 0))
1984                         return ret;
1985         }
1986         return 0;
1987 }
1988
1989 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1990                                  uint32_t size)
1991 {
1992         if (likely(sw_context->cmd_bounce_size >= size))
1993                 return 0;
1994
1995         if (sw_context->cmd_bounce_size == 0)
1996                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1997
1998         while (sw_context->cmd_bounce_size < size) {
1999                 sw_context->cmd_bounce_size =
2000                         PAGE_ALIGN(sw_context->cmd_bounce_size +
2001                                    (sw_context->cmd_bounce_size >> 1));
2002         }
2003
2004         if (sw_context->cmd_bounce != NULL)
2005                 vfree(sw_context->cmd_bounce);
2006
2007         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2008
2009         if (sw_context->cmd_bounce == NULL) {
2010                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2011                 sw_context->cmd_bounce_size = 0;
2012                 return -ENOMEM;
2013         }
2014
2015         return 0;
2016 }
2017
2018 /**
2019  * vmw_execbuf_fence_commands - create and submit a command stream fence
2020  *
2021  * Creates a fence object and submits a command stream marker.
2022  * If this fails for some reason, We sync the fifo and return NULL.
2023  * It is then safe to fence buffers with a NULL pointer.
2024  *
2025  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2026  * a userspace handle if @p_handle is not NULL, otherwise not.
2027  */
2028
2029 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2030                                struct vmw_private *dev_priv,
2031                                struct vmw_fence_obj **p_fence,
2032                                uint32_t *p_handle)
2033 {
2034         uint32_t sequence;
2035         int ret;
2036         bool synced = false;
2037
2038         /* p_handle implies file_priv. */
2039         BUG_ON(p_handle != NULL && file_priv == NULL);
2040
2041         ret = vmw_fifo_send_fence(dev_priv, &sequence);
2042         if (unlikely(ret != 0)) {
2043                 DRM_ERROR("Fence submission error. Syncing.\n");
2044                 synced = true;
2045         }
2046
2047         if (p_handle != NULL)
2048                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2049                                             sequence,
2050                                             DRM_VMW_FENCE_FLAG_EXEC,
2051                                             p_fence, p_handle);
2052         else
2053                 ret = vmw_fence_create(dev_priv->fman, sequence,
2054                                        DRM_VMW_FENCE_FLAG_EXEC,
2055                                        p_fence);
2056
2057         if (unlikely(ret != 0 && !synced)) {
2058                 (void) vmw_fallback_wait(dev_priv, false, false,
2059                                          sequence, false,
2060                                          VMW_FENCE_WAIT_TIMEOUT);
2061                 *p_fence = NULL;
2062         }
2063
2064         return 0;
2065 }
2066
2067 /**
2068  * vmw_execbuf_copy_fence_user - copy fence object information to
2069  * user-space.
2070  *
2071  * @dev_priv: Pointer to a vmw_private struct.
2072  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2073  * @ret: Return value from fence object creation.
2074  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2075  * which the information should be copied.
2076  * @fence: Pointer to the fenc object.
2077  * @fence_handle: User-space fence handle.
2078  *
2079  * This function copies fence information to user-space. If copying fails,
2080  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2081  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2082  * the error will hopefully be detected.
2083  * Also if copying fails, user-space will be unable to signal the fence
2084  * object so we wait for it immediately, and then unreference the
2085  * user-space reference.
2086  */
2087 void
2088 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2089                             struct vmw_fpriv *vmw_fp,
2090                             int ret,
2091                             struct drm_vmw_fence_rep __user *user_fence_rep,
2092                             struct vmw_fence_obj *fence,
2093                             uint32_t fence_handle)
2094 {
2095         struct drm_vmw_fence_rep fence_rep;
2096
2097         if (user_fence_rep == NULL)
2098                 return;
2099
2100         memset(&fence_rep, 0, sizeof(fence_rep));
2101
2102         fence_rep.error = ret;
2103         if (ret == 0) {
2104                 BUG_ON(fence == NULL);
2105
2106                 fence_rep.handle = fence_handle;
2107                 fence_rep.seqno = fence->seqno;
2108                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2109                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2110         }
2111
2112         /*
2113          * copy_to_user errors will be detected by user space not
2114          * seeing fence_rep::error filled in. Typically
2115          * user-space would have pre-set that member to -EFAULT.
2116          */
2117         ret = copy_to_user(user_fence_rep, &fence_rep,
2118                            sizeof(fence_rep));
2119
2120         /*
2121          * User-space lost the fence object. We need to sync
2122          * and unreference the handle.
2123          */
2124         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2125                 ttm_ref_object_base_unref(vmw_fp->tfile,
2126                                           fence_handle, TTM_REF_USAGE);
2127                 DRM_ERROR("Fence copy error. Syncing.\n");
2128                 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2129                                           false, false,
2130                                           VMW_FENCE_WAIT_TIMEOUT);
2131         }
2132 }
2133
2134 int vmw_execbuf_process(struct drm_file *file_priv,
2135                         struct vmw_private *dev_priv,
2136                         void __user *user_commands,
2137                         void *kernel_commands,
2138                         uint32_t command_size,
2139                         uint64_t throttle_us,
2140                         struct drm_vmw_fence_rep __user *user_fence_rep,
2141                         struct vmw_fence_obj **out_fence)
2142 {
2143         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2144         struct vmw_fence_obj *fence = NULL;
2145         struct vmw_resource *error_resource;
2146         struct list_head resource_list;
2147         struct ww_acquire_ctx ticket;
2148         uint32_t handle;
2149         void *cmd;
2150         int ret;
2151
2152         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2153         if (unlikely(ret != 0))
2154                 return -ERESTARTSYS;
2155
2156         if (kernel_commands == NULL) {
2157                 sw_context->kernel = false;
2158
2159                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2160                 if (unlikely(ret != 0))
2161                         goto out_unlock;
2162
2163
2164                 ret = copy_from_user(sw_context->cmd_bounce,
2165                                      user_commands, command_size);
2166
2167                 if (unlikely(ret != 0)) {
2168                         ret = -EFAULT;
2169                         DRM_ERROR("Failed copying commands.\n");
2170                         goto out_unlock;
2171                 }
2172                 kernel_commands = sw_context->cmd_bounce;
2173         } else
2174                 sw_context->kernel = true;
2175
2176         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
2177         sw_context->cur_reloc = 0;
2178         sw_context->cur_val_buf = 0;
2179         sw_context->fence_flags = 0;
2180         INIT_LIST_HEAD(&sw_context->resource_list);
2181         sw_context->cur_query_bo = dev_priv->pinned_bo;
2182         sw_context->last_query_ctx = NULL;
2183         sw_context->needs_post_query_barrier = false;
2184         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2185         INIT_LIST_HEAD(&sw_context->validate_nodes);
2186         INIT_LIST_HEAD(&sw_context->res_relocations);
2187         if (!sw_context->res_ht_initialized) {
2188                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2189                 if (unlikely(ret != 0))
2190                         goto out_unlock;
2191                 sw_context->res_ht_initialized = true;
2192         }
2193
2194         INIT_LIST_HEAD(&resource_list);
2195         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2196                                 command_size);
2197         if (unlikely(ret != 0))
2198                 goto out_err;
2199
2200         ret = vmw_resources_reserve(sw_context);
2201         if (unlikely(ret != 0))
2202                 goto out_err;
2203
2204         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2205         if (unlikely(ret != 0))
2206                 goto out_err;
2207
2208         ret = vmw_validate_buffers(dev_priv, sw_context);
2209         if (unlikely(ret != 0))
2210                 goto out_err;
2211
2212         ret = vmw_resources_validate(sw_context);
2213         if (unlikely(ret != 0))
2214                 goto out_err;
2215
2216         if (throttle_us) {
2217                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2218                                    throttle_us);
2219
2220                 if (unlikely(ret != 0))
2221                         goto out_err;
2222         }
2223
2224         ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2225         if (unlikely(ret != 0)) {
2226                 ret = -ERESTARTSYS;
2227                 goto out_err;
2228         }
2229
2230         cmd = vmw_fifo_reserve(dev_priv, command_size);
2231         if (unlikely(cmd == NULL)) {
2232                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2233                 ret = -ENOMEM;
2234                 goto out_unlock_binding;
2235         }
2236
2237         vmw_apply_relocations(sw_context);
2238         memcpy(cmd, kernel_commands, command_size);
2239
2240         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2241         vmw_resource_relocations_free(&sw_context->res_relocations);
2242
2243         vmw_fifo_commit(dev_priv, command_size);
2244
2245         vmw_query_bo_switch_commit(dev_priv, sw_context);
2246         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2247                                          &fence,
2248                                          (user_fence_rep) ? &handle : NULL);
2249         /*
2250          * This error is harmless, because if fence submission fails,
2251          * vmw_fifo_send_fence will sync. The error will be propagated to
2252          * user-space in @fence_rep
2253          */
2254
2255         if (ret != 0)
2256                 DRM_ERROR("Fence submission error. Syncing.\n");
2257
2258         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2259         mutex_unlock(&dev_priv->binding_mutex);
2260
2261         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2262                                     (void *) fence);
2263
2264         if (unlikely(dev_priv->pinned_bo != NULL &&
2265                      !dev_priv->query_cid_valid))
2266                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2267
2268         vmw_clear_validations(sw_context);
2269         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2270                                     user_fence_rep, fence, handle);
2271
2272         /* Don't unreference when handing fence out */
2273         if (unlikely(out_fence != NULL)) {
2274                 *out_fence = fence;
2275                 fence = NULL;
2276         } else if (likely(fence != NULL)) {
2277                 vmw_fence_obj_unreference(&fence);
2278         }
2279
2280         list_splice_init(&sw_context->resource_list, &resource_list);
2281         mutex_unlock(&dev_priv->cmdbuf_mutex);
2282
2283         /*
2284          * Unreference resources outside of the cmdbuf_mutex to
2285          * avoid deadlocks in resource destruction paths.
2286          */
2287         vmw_resource_list_unreference(&resource_list);
2288
2289         return 0;
2290
2291 out_unlock_binding:
2292         mutex_unlock(&dev_priv->binding_mutex);
2293 out_err:
2294         vmw_resource_relocations_free(&sw_context->res_relocations);
2295         vmw_free_relocations(sw_context);
2296         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2297         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2298         vmw_clear_validations(sw_context);
2299         if (unlikely(dev_priv->pinned_bo != NULL &&
2300                      !dev_priv->query_cid_valid))
2301                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2302 out_unlock:
2303         list_splice_init(&sw_context->resource_list, &resource_list);
2304         error_resource = sw_context->error_resource;
2305         sw_context->error_resource = NULL;
2306         mutex_unlock(&dev_priv->cmdbuf_mutex);
2307
2308         /*
2309          * Unreference resources outside of the cmdbuf_mutex to
2310          * avoid deadlocks in resource destruction paths.
2311          */
2312         vmw_resource_list_unreference(&resource_list);
2313         if (unlikely(error_resource != NULL))
2314                 vmw_resource_unreference(&error_resource);
2315
2316         return ret;
2317 }
2318
2319 /**
2320  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2321  *
2322  * @dev_priv: The device private structure.
2323  *
2324  * This function is called to idle the fifo and unpin the query buffer
2325  * if the normal way to do this hits an error, which should typically be
2326  * extremely rare.
2327  */
2328 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2329 {
2330         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2331
2332         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2333         vmw_bo_pin(dev_priv->pinned_bo, false);
2334         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2335         dev_priv->dummy_query_bo_pinned = false;
2336 }
2337
2338
2339 /**
2340  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2341  * query bo.
2342  *
2343  * @dev_priv: The device private structure.
2344  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2345  * _after_ a query barrier that flushes all queries touching the current
2346  * buffer pointed to by @dev_priv->pinned_bo
2347  *
2348  * This function should be used to unpin the pinned query bo, or
2349  * as a query barrier when we need to make sure that all queries have
2350  * finished before the next fifo command. (For example on hardware
2351  * context destructions where the hardware may otherwise leak unfinished
2352  * queries).
2353  *
2354  * This function does not return any failure codes, but make attempts
2355  * to do safe unpinning in case of errors.
2356  *
2357  * The function will synchronize on the previous query barrier, and will
2358  * thus not finish until that barrier has executed.
2359  *
2360  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2361  * before calling this function.
2362  */
2363 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2364                                      struct vmw_fence_obj *fence)
2365 {
2366         int ret = 0;
2367         struct list_head validate_list;
2368         struct ttm_validate_buffer pinned_val, query_val;
2369         struct vmw_fence_obj *lfence = NULL;
2370         struct ww_acquire_ctx ticket;
2371
2372         if (dev_priv->pinned_bo == NULL)
2373                 goto out_unlock;
2374
2375         INIT_LIST_HEAD(&validate_list);
2376
2377         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2378         list_add_tail(&pinned_val.head, &validate_list);
2379
2380         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2381         list_add_tail(&query_val.head, &validate_list);
2382
2383         do {
2384                 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2385         } while (ret == -ERESTARTSYS);
2386
2387         if (unlikely(ret != 0)) {
2388                 vmw_execbuf_unpin_panic(dev_priv);
2389                 goto out_no_reserve;
2390         }
2391
2392         if (dev_priv->query_cid_valid) {
2393                 BUG_ON(fence != NULL);
2394                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2395                 if (unlikely(ret != 0)) {
2396                         vmw_execbuf_unpin_panic(dev_priv);
2397                         goto out_no_emit;
2398                 }
2399                 dev_priv->query_cid_valid = false;
2400         }
2401
2402         vmw_bo_pin(dev_priv->pinned_bo, false);
2403         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2404         dev_priv->dummy_query_bo_pinned = false;
2405
2406         if (fence == NULL) {
2407                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2408                                                   NULL);
2409                 fence = lfence;
2410         }
2411         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2412         if (lfence != NULL)
2413                 vmw_fence_obj_unreference(&lfence);
2414
2415         ttm_bo_unref(&query_val.bo);
2416         ttm_bo_unref(&pinned_val.bo);
2417         ttm_bo_unref(&dev_priv->pinned_bo);
2418
2419 out_unlock:
2420         return;
2421
2422 out_no_emit:
2423         ttm_eu_backoff_reservation(&ticket, &validate_list);
2424 out_no_reserve:
2425         ttm_bo_unref(&query_val.bo);
2426         ttm_bo_unref(&pinned_val.bo);
2427         ttm_bo_unref(&dev_priv->pinned_bo);
2428 }
2429
2430 /**
2431  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2432  * query bo.
2433  *
2434  * @dev_priv: The device private structure.
2435  *
2436  * This function should be used to unpin the pinned query bo, or
2437  * as a query barrier when we need to make sure that all queries have
2438  * finished before the next fifo command. (For example on hardware
2439  * context destructions where the hardware may otherwise leak unfinished
2440  * queries).
2441  *
2442  * This function does not return any failure codes, but make attempts
2443  * to do safe unpinning in case of errors.
2444  *
2445  * The function will synchronize on the previous query barrier, and will
2446  * thus not finish until that barrier has executed.
2447  */
2448 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2449 {
2450         mutex_lock(&dev_priv->cmdbuf_mutex);
2451         if (dev_priv->query_cid_valid)
2452                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2453         mutex_unlock(&dev_priv->cmdbuf_mutex);
2454 }
2455
2456
2457 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2458                       struct drm_file *file_priv)
2459 {
2460         struct vmw_private *dev_priv = vmw_priv(dev);
2461         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2462         struct vmw_master *vmaster = vmw_master(file_priv->master);
2463         int ret;
2464
2465         /*
2466          * This will allow us to extend the ioctl argument while
2467          * maintaining backwards compatibility:
2468          * We take different code paths depending on the value of
2469          * arg->version.
2470          */
2471
2472         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2473                 DRM_ERROR("Incorrect execbuf version.\n");
2474                 DRM_ERROR("You're running outdated experimental "
2475                           "vmwgfx user-space drivers.");
2476                 return -EINVAL;
2477         }
2478
2479         ret = ttm_read_lock(&vmaster->lock, true);
2480         if (unlikely(ret != 0))
2481                 return ret;
2482
2483         ret = vmw_execbuf_process(file_priv, dev_priv,
2484                                   (void __user *)(unsigned long)arg->commands,
2485                                   NULL, arg->command_size, arg->throttle_us,
2486                                   (void __user *)(unsigned long)arg->fence_rep,
2487                                   NULL);
2488
2489         if (unlikely(ret != 0))
2490                 goto out_unlock;
2491
2492         vmw_kms_cursor_post_execbuf(dev_priv);
2493
2494 out_unlock:
2495         ttm_read_unlock(&vmaster->lock);
2496         return ret;
2497 }