]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
4d51ad0a2f51febc9e546b6f7ac1ac761d8cc925
[~andy/linux] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58  * @first_usage: Set to true the first time the resource is referenced in
59  * the command stream.
60  * @no_buffer_needed: Resources do not need to allocate buffer backup on
61  * reservation. The command stream will provide one.
62  */
63 struct vmw_resource_val_node {
64         struct list_head head;
65         struct drm_hash_item hash;
66         struct vmw_resource *res;
67         struct vmw_dma_buffer *new_backup;
68         unsigned long new_backup_offset;
69         bool first_usage;
70         bool no_buffer_needed;
71 };
72
73 /**
74  * vmw_resource_unreserve - unreserve resources previously reserved for
75  * command submission.
76  *
77  * @list_head: list of resources to unreserve.
78  * @backoff: Whether command submission failed.
79  */
80 static void vmw_resource_list_unreserve(struct list_head *list,
81                                         bool backoff)
82 {
83         struct vmw_resource_val_node *val;
84
85         list_for_each_entry(val, list, head) {
86                 struct vmw_resource *res = val->res;
87                 struct vmw_dma_buffer *new_backup =
88                         backoff ? NULL : val->new_backup;
89
90                 vmw_resource_unreserve(res, new_backup,
91                         val->new_backup_offset);
92                 vmw_dmabuf_unreference(&val->new_backup);
93         }
94 }
95
96
97 /**
98  * vmw_resource_val_add - Add a resource to the software context's
99  * resource list if it's not already on it.
100  *
101  * @sw_context: Pointer to the software context.
102  * @res: Pointer to the resource.
103  * @p_node On successful return points to a valid pointer to a
104  * struct vmw_resource_val_node, if non-NULL on entry.
105  */
106 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
107                                 struct vmw_resource *res,
108                                 struct vmw_resource_val_node **p_node)
109 {
110         struct vmw_resource_val_node *node;
111         struct drm_hash_item *hash;
112         int ret;
113
114         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
115                                     &hash) == 0)) {
116                 node = container_of(hash, struct vmw_resource_val_node, hash);
117                 node->first_usage = false;
118                 if (unlikely(p_node != NULL))
119                         *p_node = node;
120                 return 0;
121         }
122
123         node = kzalloc(sizeof(*node), GFP_KERNEL);
124         if (unlikely(node == NULL)) {
125                 DRM_ERROR("Failed to allocate a resource validation "
126                           "entry.\n");
127                 return -ENOMEM;
128         }
129
130         node->hash.key = (unsigned long) res;
131         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
132         if (unlikely(ret != 0)) {
133                 DRM_ERROR("Failed to initialize a resource validation "
134                           "entry.\n");
135                 kfree(node);
136                 return ret;
137         }
138         list_add_tail(&node->head, &sw_context->resource_list);
139         node->res = vmw_resource_reference(res);
140         node->first_usage = true;
141
142         if (unlikely(p_node != NULL))
143                 *p_node = node;
144
145         return 0;
146 }
147
148 /**
149  * vmw_resource_relocation_add - Add a relocation to the relocation list
150  *
151  * @list: Pointer to head of relocation list.
152  * @res: The resource.
153  * @offset: Offset into the command buffer currently being parsed where the
154  * id that needs fixup is located. Granularity is 4 bytes.
155  */
156 static int vmw_resource_relocation_add(struct list_head *list,
157                                        const struct vmw_resource *res,
158                                        unsigned long offset)
159 {
160         struct vmw_resource_relocation *rel;
161
162         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
163         if (unlikely(rel == NULL)) {
164                 DRM_ERROR("Failed to allocate a resource relocation.\n");
165                 return -ENOMEM;
166         }
167
168         rel->res = res;
169         rel->offset = offset;
170         list_add_tail(&rel->head, list);
171
172         return 0;
173 }
174
175 /**
176  * vmw_resource_relocations_free - Free all relocations on a list
177  *
178  * @list: Pointer to the head of the relocation list.
179  */
180 static void vmw_resource_relocations_free(struct list_head *list)
181 {
182         struct vmw_resource_relocation *rel, *n;
183
184         list_for_each_entry_safe(rel, n, list, head) {
185                 list_del(&rel->head);
186                 kfree(rel);
187         }
188 }
189
190 /**
191  * vmw_resource_relocations_apply - Apply all relocations on a list
192  *
193  * @cb: Pointer to the start of the command buffer bein patch. This need
194  * not be the same buffer as the one being parsed when the relocation
195  * list was built, but the contents must be the same modulo the
196  * resource ids.
197  * @list: Pointer to the head of the relocation list.
198  */
199 static void vmw_resource_relocations_apply(uint32_t *cb,
200                                            struct list_head *list)
201 {
202         struct vmw_resource_relocation *rel;
203
204         list_for_each_entry(rel, list, head)
205                 cb[rel->offset] = rel->res->id;
206 }
207
208 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
209                            struct vmw_sw_context *sw_context,
210                            SVGA3dCmdHeader *header)
211 {
212         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
213 }
214
215 static int vmw_cmd_ok(struct vmw_private *dev_priv,
216                       struct vmw_sw_context *sw_context,
217                       SVGA3dCmdHeader *header)
218 {
219         return 0;
220 }
221
222 /**
223  * vmw_bo_to_validate_list - add a bo to a validate list
224  *
225  * @sw_context: The software context used for this command submission batch.
226  * @bo: The buffer object to add.
227  * @validate_as_mob: Validate this buffer as a MOB.
228  * @p_val_node: If non-NULL Will be updated with the validate node number
229  * on return.
230  *
231  * Returns -EINVAL if the limit of number of buffer objects per command
232  * submission is reached.
233  */
234 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
235                                    struct ttm_buffer_object *bo,
236                                    bool validate_as_mob,
237                                    uint32_t *p_val_node)
238 {
239         uint32_t val_node;
240         struct vmw_validate_buffer *vval_buf;
241         struct ttm_validate_buffer *val_buf;
242         struct drm_hash_item *hash;
243         int ret;
244
245         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
246                                     &hash) == 0)) {
247                 vval_buf = container_of(hash, struct vmw_validate_buffer,
248                                         hash);
249                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
250                         DRM_ERROR("Inconsistent buffer usage.\n");
251                         return -EINVAL;
252                 }
253                 val_buf = &vval_buf->base;
254                 val_node = vval_buf - sw_context->val_bufs;
255         } else {
256                 val_node = sw_context->cur_val_buf;
257                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
258                         DRM_ERROR("Max number of DMA buffers per submission "
259                                   "exceeded.\n");
260                         return -EINVAL;
261                 }
262                 vval_buf = &sw_context->val_bufs[val_node];
263                 vval_buf->hash.key = (unsigned long) bo;
264                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
265                 if (unlikely(ret != 0)) {
266                         DRM_ERROR("Failed to initialize a buffer validation "
267                                   "entry.\n");
268                         return ret;
269                 }
270                 ++sw_context->cur_val_buf;
271                 val_buf = &vval_buf->base;
272                 val_buf->bo = ttm_bo_reference(bo);
273                 val_buf->reserved = false;
274                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
275                 vval_buf->validate_as_mob = validate_as_mob;
276         }
277
278         sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
279
280         if (p_val_node)
281                 *p_val_node = val_node;
282
283         return 0;
284 }
285
286 /**
287  * vmw_resources_reserve - Reserve all resources on the sw_context's
288  * resource list.
289  *
290  * @sw_context: Pointer to the software context.
291  *
292  * Note that since vmware's command submission currently is protected by
293  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
294  * since only a single thread at once will attempt this.
295  */
296 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
297 {
298         struct vmw_resource_val_node *val;
299         int ret;
300
301         list_for_each_entry(val, &sw_context->resource_list, head) {
302                 struct vmw_resource *res = val->res;
303
304                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
305                 if (unlikely(ret != 0))
306                         return ret;
307
308                 if (res->backup) {
309                         struct ttm_buffer_object *bo = &res->backup->base;
310
311                         ret = vmw_bo_to_validate_list
312                                 (sw_context, bo,
313                                  vmw_resource_needs_backup(res), NULL);
314
315                         if (unlikely(ret != 0))
316                                 return ret;
317                 }
318         }
319         return 0;
320 }
321
322 /**
323  * vmw_resources_validate - Validate all resources on the sw_context's
324  * resource list.
325  *
326  * @sw_context: Pointer to the software context.
327  *
328  * Before this function is called, all resource backup buffers must have
329  * been validated.
330  */
331 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
332 {
333         struct vmw_resource_val_node *val;
334         int ret;
335
336         list_for_each_entry(val, &sw_context->resource_list, head) {
337                 struct vmw_resource *res = val->res;
338
339                 ret = vmw_resource_validate(res);
340                 if (unlikely(ret != 0)) {
341                         if (ret != -ERESTARTSYS)
342                                 DRM_ERROR("Failed to validate resource.\n");
343                         return ret;
344                 }
345         }
346         return 0;
347 }
348
349 /**
350  * vmw_cmd_res_check - Check that a resource is present and if so, put it
351  * on the resource validate list unless it's already there.
352  *
353  * @dev_priv: Pointer to a device private structure.
354  * @sw_context: Pointer to the software context.
355  * @res_type: Resource type.
356  * @converter: User-space visisble type specific information.
357  * @id: Pointer to the location in the command buffer currently being
358  * parsed from where the user-space resource id handle is located.
359  */
360 static int vmw_cmd_res_check(struct vmw_private *dev_priv,
361                              struct vmw_sw_context *sw_context,
362                              enum vmw_res_type res_type,
363                              const struct vmw_user_resource_conv *converter,
364                              uint32_t *id,
365                              struct vmw_resource_val_node **p_val)
366 {
367         struct vmw_res_cache_entry *rcache =
368                 &sw_context->res_cache[res_type];
369         struct vmw_resource *res;
370         struct vmw_resource_val_node *node;
371         int ret;
372
373         if (*id == SVGA3D_INVALID_ID)
374                 return 0;
375
376         /*
377          * Fastpath in case of repeated commands referencing the same
378          * resource
379          */
380
381         if (likely(rcache->valid && *id == rcache->handle)) {
382                 const struct vmw_resource *res = rcache->res;
383
384                 rcache->node->first_usage = false;
385                 if (p_val)
386                         *p_val = rcache->node;
387
388                 return vmw_resource_relocation_add
389                         (&sw_context->res_relocations, res,
390                          id - sw_context->buf_start);
391         }
392
393         ret = vmw_user_resource_lookup_handle(dev_priv,
394                                               sw_context->tfile,
395                                               *id,
396                                               converter,
397                                               &res);
398         if (unlikely(ret != 0)) {
399                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
400                           (unsigned) *id);
401                 dump_stack();
402                 return ret;
403         }
404
405         rcache->valid = true;
406         rcache->res = res;
407         rcache->handle = *id;
408
409         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
410                                           res,
411                                           id - sw_context->buf_start);
412         if (unlikely(ret != 0))
413                 goto out_no_reloc;
414
415         ret = vmw_resource_val_add(sw_context, res, &node);
416         if (unlikely(ret != 0))
417                 goto out_no_reloc;
418
419         rcache->node = node;
420         if (p_val)
421                 *p_val = node;
422         vmw_resource_unreference(&res);
423         return 0;
424
425 out_no_reloc:
426         BUG_ON(sw_context->error_resource != NULL);
427         sw_context->error_resource = res;
428
429         return ret;
430 }
431
432 /**
433  * vmw_cmd_cid_check - Check a command header for valid context information.
434  *
435  * @dev_priv: Pointer to a device private structure.
436  * @sw_context: Pointer to the software context.
437  * @header: A command header with an embedded user-space context handle.
438  *
439  * Convenience function: Call vmw_cmd_res_check with the user-space context
440  * handle embedded in @header.
441  */
442 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
443                              struct vmw_sw_context *sw_context,
444                              SVGA3dCmdHeader *header)
445 {
446         struct vmw_cid_cmd {
447                 SVGA3dCmdHeader header;
448                 __le32 cid;
449         } *cmd;
450
451         cmd = container_of(header, struct vmw_cid_cmd, header);
452         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
453                                  user_context_converter, &cmd->cid, NULL);
454 }
455
456 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
457                                            struct vmw_sw_context *sw_context,
458                                            SVGA3dCmdHeader *header)
459 {
460         struct vmw_sid_cmd {
461                 SVGA3dCmdHeader header;
462                 SVGA3dCmdSetRenderTarget body;
463         } *cmd;
464         int ret;
465
466         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
467         if (unlikely(ret != 0))
468                 return ret;
469
470         cmd = container_of(header, struct vmw_sid_cmd, header);
471         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
472                                 user_surface_converter,
473                                 &cmd->body.target.sid, NULL);
474         return ret;
475 }
476
477 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
478                                       struct vmw_sw_context *sw_context,
479                                       SVGA3dCmdHeader *header)
480 {
481         struct vmw_sid_cmd {
482                 SVGA3dCmdHeader header;
483                 SVGA3dCmdSurfaceCopy body;
484         } *cmd;
485         int ret;
486
487         cmd = container_of(header, struct vmw_sid_cmd, header);
488         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
489                                 user_surface_converter,
490                                 &cmd->body.src.sid, NULL);
491         if (unlikely(ret != 0))
492                 return ret;
493         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
494                                  user_surface_converter,
495                                  &cmd->body.dest.sid, NULL);
496 }
497
498 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
499                                      struct vmw_sw_context *sw_context,
500                                      SVGA3dCmdHeader *header)
501 {
502         struct vmw_sid_cmd {
503                 SVGA3dCmdHeader header;
504                 SVGA3dCmdSurfaceStretchBlt body;
505         } *cmd;
506         int ret;
507
508         cmd = container_of(header, struct vmw_sid_cmd, header);
509         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
510                                 user_surface_converter,
511                                 &cmd->body.src.sid, NULL);
512         if (unlikely(ret != 0))
513                 return ret;
514         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
515                                  user_surface_converter,
516                                  &cmd->body.dest.sid, NULL);
517 }
518
519 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
520                                          struct vmw_sw_context *sw_context,
521                                          SVGA3dCmdHeader *header)
522 {
523         struct vmw_sid_cmd {
524                 SVGA3dCmdHeader header;
525                 SVGA3dCmdBlitSurfaceToScreen body;
526         } *cmd;
527
528         cmd = container_of(header, struct vmw_sid_cmd, header);
529
530         if (unlikely(!sw_context->kernel)) {
531                 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
532                 return -EPERM;
533         }
534
535         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
536                                  user_surface_converter,
537                                  &cmd->body.srcImage.sid, NULL);
538 }
539
540 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
541                                  struct vmw_sw_context *sw_context,
542                                  SVGA3dCmdHeader *header)
543 {
544         struct vmw_sid_cmd {
545                 SVGA3dCmdHeader header;
546                 SVGA3dCmdPresent body;
547         } *cmd;
548
549
550         cmd = container_of(header, struct vmw_sid_cmd, header);
551
552         if (unlikely(!sw_context->kernel)) {
553                 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
554                 return -EPERM;
555         }
556
557         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
558                                  user_surface_converter, &cmd->body.sid,
559                                  NULL);
560 }
561
562 /**
563  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
564  *
565  * @dev_priv: The device private structure.
566  * @new_query_bo: The new buffer holding query results.
567  * @sw_context: The software context used for this command submission.
568  *
569  * This function checks whether @new_query_bo is suitable for holding
570  * query results, and if another buffer currently is pinned for query
571  * results. If so, the function prepares the state of @sw_context for
572  * switching pinned buffers after successful submission of the current
573  * command batch.
574  */
575 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
576                                        struct ttm_buffer_object *new_query_bo,
577                                        struct vmw_sw_context *sw_context)
578 {
579         struct vmw_res_cache_entry *ctx_entry =
580                 &sw_context->res_cache[vmw_res_context];
581         int ret;
582
583         BUG_ON(!ctx_entry->valid);
584         sw_context->last_query_ctx = ctx_entry->res;
585
586         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
587
588                 if (unlikely(new_query_bo->num_pages > 4)) {
589                         DRM_ERROR("Query buffer too large.\n");
590                         return -EINVAL;
591                 }
592
593                 if (unlikely(sw_context->cur_query_bo != NULL)) {
594                         sw_context->needs_post_query_barrier = true;
595                         ret = vmw_bo_to_validate_list(sw_context,
596                                                       sw_context->cur_query_bo,
597                                                       dev_priv->has_mob, NULL);
598                         if (unlikely(ret != 0))
599                                 return ret;
600                 }
601                 sw_context->cur_query_bo = new_query_bo;
602
603                 ret = vmw_bo_to_validate_list(sw_context,
604                                               dev_priv->dummy_query_bo,
605                                               dev_priv->has_mob, NULL);
606                 if (unlikely(ret != 0))
607                         return ret;
608
609         }
610
611         return 0;
612 }
613
614
615 /**
616  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
617  *
618  * @dev_priv: The device private structure.
619  * @sw_context: The software context used for this command submission batch.
620  *
621  * This function will check if we're switching query buffers, and will then,
622  * issue a dummy occlusion query wait used as a query barrier. When the fence
623  * object following that query wait has signaled, we are sure that all
624  * preceding queries have finished, and the old query buffer can be unpinned.
625  * However, since both the new query buffer and the old one are fenced with
626  * that fence, we can do an asynchronus unpin now, and be sure that the
627  * old query buffer won't be moved until the fence has signaled.
628  *
629  * As mentioned above, both the new - and old query buffers need to be fenced
630  * using a sequence emitted *after* calling this function.
631  */
632 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
633                                      struct vmw_sw_context *sw_context)
634 {
635         /*
636          * The validate list should still hold references to all
637          * contexts here.
638          */
639
640         if (sw_context->needs_post_query_barrier) {
641                 struct vmw_res_cache_entry *ctx_entry =
642                         &sw_context->res_cache[vmw_res_context];
643                 struct vmw_resource *ctx;
644                 int ret;
645
646                 BUG_ON(!ctx_entry->valid);
647                 ctx = ctx_entry->res;
648
649                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
650
651                 if (unlikely(ret != 0))
652                         DRM_ERROR("Out of fifo space for dummy query.\n");
653         }
654
655         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
656                 if (dev_priv->pinned_bo) {
657                         vmw_bo_pin(dev_priv->pinned_bo, false);
658                         ttm_bo_unref(&dev_priv->pinned_bo);
659                 }
660
661                 if (!sw_context->needs_post_query_barrier) {
662                         vmw_bo_pin(sw_context->cur_query_bo, true);
663
664                         /*
665                          * We pin also the dummy_query_bo buffer so that we
666                          * don't need to validate it when emitting
667                          * dummy queries in context destroy paths.
668                          */
669
670                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
671                         dev_priv->dummy_query_bo_pinned = true;
672
673                         BUG_ON(sw_context->last_query_ctx == NULL);
674                         dev_priv->query_cid = sw_context->last_query_ctx->id;
675                         dev_priv->query_cid_valid = true;
676                         dev_priv->pinned_bo =
677                                 ttm_bo_reference(sw_context->cur_query_bo);
678                 }
679         }
680 }
681
682 /**
683  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
684  * handle to a MOB id.
685  *
686  * @dev_priv: Pointer to a device private structure.
687  * @sw_context: The software context used for this command batch validation.
688  * @id: Pointer to the user-space handle to be translated.
689  * @vmw_bo_p: Points to a location that, on successful return will carry
690  * a reference-counted pointer to the DMA buffer identified by the
691  * user-space handle in @id.
692  *
693  * This function saves information needed to translate a user-space buffer
694  * handle to a MOB id. The translation does not take place immediately, but
695  * during a call to vmw_apply_relocations(). This function builds a relocation
696  * list and a list of buffers to validate. The former needs to be freed using
697  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
698  * needs to be freed using vmw_clear_validations.
699  */
700 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
701                                  struct vmw_sw_context *sw_context,
702                                  SVGAMobId *id,
703                                  struct vmw_dma_buffer **vmw_bo_p)
704 {
705         struct vmw_dma_buffer *vmw_bo = NULL;
706         struct ttm_buffer_object *bo;
707         uint32_t handle = *id;
708         struct vmw_relocation *reloc;
709         int ret;
710
711         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
712         if (unlikely(ret != 0)) {
713                 DRM_ERROR("Could not find or use MOB buffer.\n");
714                 return -EINVAL;
715         }
716         bo = &vmw_bo->base;
717
718         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
719                 DRM_ERROR("Max number relocations per submission"
720                           " exceeded\n");
721                 ret = -EINVAL;
722                 goto out_no_reloc;
723         }
724
725         reloc = &sw_context->relocs[sw_context->cur_reloc++];
726         reloc->mob_loc = id;
727         reloc->location = NULL;
728
729         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
730         if (unlikely(ret != 0))
731                 goto out_no_reloc;
732
733         *vmw_bo_p = vmw_bo;
734         return 0;
735
736 out_no_reloc:
737         vmw_dmabuf_unreference(&vmw_bo);
738         vmw_bo_p = NULL;
739         return ret;
740 }
741
742 /**
743  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
744  * handle to a valid SVGAGuestPtr
745  *
746  * @dev_priv: Pointer to a device private structure.
747  * @sw_context: The software context used for this command batch validation.
748  * @ptr: Pointer to the user-space handle to be translated.
749  * @vmw_bo_p: Points to a location that, on successful return will carry
750  * a reference-counted pointer to the DMA buffer identified by the
751  * user-space handle in @id.
752  *
753  * This function saves information needed to translate a user-space buffer
754  * handle to a valid SVGAGuestPtr. The translation does not take place
755  * immediately, but during a call to vmw_apply_relocations().
756  * This function builds a relocation list and a list of buffers to validate.
757  * The former needs to be freed using either vmw_apply_relocations() or
758  * vmw_free_relocations(). The latter needs to be freed using
759  * vmw_clear_validations.
760  */
761 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
762                                    struct vmw_sw_context *sw_context,
763                                    SVGAGuestPtr *ptr,
764                                    struct vmw_dma_buffer **vmw_bo_p)
765 {
766         struct vmw_dma_buffer *vmw_bo = NULL;
767         struct ttm_buffer_object *bo;
768         uint32_t handle = ptr->gmrId;
769         struct vmw_relocation *reloc;
770         int ret;
771
772         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
773         if (unlikely(ret != 0)) {
774                 DRM_ERROR("Could not find or use GMR region.\n");
775                 return -EINVAL;
776         }
777         bo = &vmw_bo->base;
778
779         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
780                 DRM_ERROR("Max number relocations per submission"
781                           " exceeded\n");
782                 ret = -EINVAL;
783                 goto out_no_reloc;
784         }
785
786         reloc = &sw_context->relocs[sw_context->cur_reloc++];
787         reloc->location = ptr;
788
789         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
790         if (unlikely(ret != 0))
791                 goto out_no_reloc;
792
793         *vmw_bo_p = vmw_bo;
794         return 0;
795
796 out_no_reloc:
797         vmw_dmabuf_unreference(&vmw_bo);
798         vmw_bo_p = NULL;
799         return ret;
800 }
801
802 /**
803  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
804  *
805  * @dev_priv: Pointer to a device private struct.
806  * @sw_context: The software context used for this command submission.
807  * @header: Pointer to the command header in the command stream.
808  */
809 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
810                                   struct vmw_sw_context *sw_context,
811                                   SVGA3dCmdHeader *header)
812 {
813         struct vmw_begin_gb_query_cmd {
814                 SVGA3dCmdHeader header;
815                 SVGA3dCmdBeginGBQuery q;
816         } *cmd;
817
818         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
819                            header);
820
821         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
822                                  user_context_converter, &cmd->q.cid,
823                                  NULL);
824 }
825
826 /**
827  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
828  *
829  * @dev_priv: Pointer to a device private struct.
830  * @sw_context: The software context used for this command submission.
831  * @header: Pointer to the command header in the command stream.
832  */
833 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
834                                struct vmw_sw_context *sw_context,
835                                SVGA3dCmdHeader *header)
836 {
837         struct vmw_begin_query_cmd {
838                 SVGA3dCmdHeader header;
839                 SVGA3dCmdBeginQuery q;
840         } *cmd;
841
842         cmd = container_of(header, struct vmw_begin_query_cmd,
843                            header);
844
845         if (unlikely(dev_priv->has_mob)) {
846                 struct {
847                         SVGA3dCmdHeader header;
848                         SVGA3dCmdBeginGBQuery q;
849                 } gb_cmd;
850
851                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
852
853                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
854                 gb_cmd.header.size = cmd->header.size;
855                 gb_cmd.q.cid = cmd->q.cid;
856                 gb_cmd.q.type = cmd->q.type;
857
858                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
859                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
860         }
861
862         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
863                                  user_context_converter, &cmd->q.cid,
864                                  NULL);
865 }
866
867 /**
868  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
869  *
870  * @dev_priv: Pointer to a device private struct.
871  * @sw_context: The software context used for this command submission.
872  * @header: Pointer to the command header in the command stream.
873  */
874 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
875                                 struct vmw_sw_context *sw_context,
876                                 SVGA3dCmdHeader *header)
877 {
878         struct vmw_dma_buffer *vmw_bo;
879         struct vmw_query_cmd {
880                 SVGA3dCmdHeader header;
881                 SVGA3dCmdEndGBQuery q;
882         } *cmd;
883         int ret;
884
885         cmd = container_of(header, struct vmw_query_cmd, header);
886         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
887         if (unlikely(ret != 0))
888                 return ret;
889
890         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
891                                     &cmd->q.mobid,
892                                     &vmw_bo);
893         if (unlikely(ret != 0))
894                 return ret;
895
896         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
897
898         vmw_dmabuf_unreference(&vmw_bo);
899         return ret;
900 }
901
902 /**
903  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
904  *
905  * @dev_priv: Pointer to a device private struct.
906  * @sw_context: The software context used for this command submission.
907  * @header: Pointer to the command header in the command stream.
908  */
909 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
910                              struct vmw_sw_context *sw_context,
911                              SVGA3dCmdHeader *header)
912 {
913         struct vmw_dma_buffer *vmw_bo;
914         struct vmw_query_cmd {
915                 SVGA3dCmdHeader header;
916                 SVGA3dCmdEndQuery q;
917         } *cmd;
918         int ret;
919
920         cmd = container_of(header, struct vmw_query_cmd, header);
921         if (dev_priv->has_mob) {
922                 struct {
923                         SVGA3dCmdHeader header;
924                         SVGA3dCmdEndGBQuery q;
925                 } gb_cmd;
926
927                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
928
929                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
930                 gb_cmd.header.size = cmd->header.size;
931                 gb_cmd.q.cid = cmd->q.cid;
932                 gb_cmd.q.type = cmd->q.type;
933                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
934                 gb_cmd.q.offset = cmd->q.guestResult.offset;
935
936                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
937                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
938         }
939
940         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
941         if (unlikely(ret != 0))
942                 return ret;
943
944         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
945                                       &cmd->q.guestResult,
946                                       &vmw_bo);
947         if (unlikely(ret != 0))
948                 return ret;
949
950         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
951
952         vmw_dmabuf_unreference(&vmw_bo);
953         return ret;
954 }
955
956 /**
957  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
958  *
959  * @dev_priv: Pointer to a device private struct.
960  * @sw_context: The software context used for this command submission.
961  * @header: Pointer to the command header in the command stream.
962  */
963 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
964                                  struct vmw_sw_context *sw_context,
965                                  SVGA3dCmdHeader *header)
966 {
967         struct vmw_dma_buffer *vmw_bo;
968         struct vmw_query_cmd {
969                 SVGA3dCmdHeader header;
970                 SVGA3dCmdWaitForGBQuery q;
971         } *cmd;
972         int ret;
973
974         cmd = container_of(header, struct vmw_query_cmd, header);
975         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
976         if (unlikely(ret != 0))
977                 return ret;
978
979         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
980                                     &cmd->q.mobid,
981                                     &vmw_bo);
982         if (unlikely(ret != 0))
983                 return ret;
984
985         vmw_dmabuf_unreference(&vmw_bo);
986         return 0;
987 }
988
989 /**
990  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
991  *
992  * @dev_priv: Pointer to a device private struct.
993  * @sw_context: The software context used for this command submission.
994  * @header: Pointer to the command header in the command stream.
995  */
996 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
997                               struct vmw_sw_context *sw_context,
998                               SVGA3dCmdHeader *header)
999 {
1000         struct vmw_dma_buffer *vmw_bo;
1001         struct vmw_query_cmd {
1002                 SVGA3dCmdHeader header;
1003                 SVGA3dCmdWaitForQuery q;
1004         } *cmd;
1005         int ret;
1006
1007         cmd = container_of(header, struct vmw_query_cmd, header);
1008         if (dev_priv->has_mob) {
1009                 struct {
1010                         SVGA3dCmdHeader header;
1011                         SVGA3dCmdWaitForGBQuery q;
1012                 } gb_cmd;
1013
1014                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1015
1016                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1017                 gb_cmd.header.size = cmd->header.size;
1018                 gb_cmd.q.cid = cmd->q.cid;
1019                 gb_cmd.q.type = cmd->q.type;
1020                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1021                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1022
1023                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1025         }
1026
1027         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1028         if (unlikely(ret != 0))
1029                 return ret;
1030
1031         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1032                                       &cmd->q.guestResult,
1033                                       &vmw_bo);
1034         if (unlikely(ret != 0))
1035                 return ret;
1036
1037         vmw_dmabuf_unreference(&vmw_bo);
1038         return 0;
1039 }
1040
1041 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1042                        struct vmw_sw_context *sw_context,
1043                        SVGA3dCmdHeader *header)
1044 {
1045         struct vmw_dma_buffer *vmw_bo = NULL;
1046         struct vmw_surface *srf = NULL;
1047         struct vmw_dma_cmd {
1048                 SVGA3dCmdHeader header;
1049                 SVGA3dCmdSurfaceDMA dma;
1050         } *cmd;
1051         int ret;
1052
1053         cmd = container_of(header, struct vmw_dma_cmd, header);
1054         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1055                                       &cmd->dma.guest.ptr,
1056                                       &vmw_bo);
1057         if (unlikely(ret != 0))
1058                 return ret;
1059
1060         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061                                 user_surface_converter, &cmd->dma.host.sid,
1062                                 NULL);
1063         if (unlikely(ret != 0)) {
1064                 if (unlikely(ret != -ERESTARTSYS))
1065                         DRM_ERROR("could not find surface for DMA.\n");
1066                 goto out_no_surface;
1067         }
1068
1069         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1070
1071         vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
1072
1073 out_no_surface:
1074         vmw_dmabuf_unreference(&vmw_bo);
1075         return ret;
1076 }
1077
1078 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1079                         struct vmw_sw_context *sw_context,
1080                         SVGA3dCmdHeader *header)
1081 {
1082         struct vmw_draw_cmd {
1083                 SVGA3dCmdHeader header;
1084                 SVGA3dCmdDrawPrimitives body;
1085         } *cmd;
1086         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1087                 (unsigned long)header + sizeof(*cmd));
1088         SVGA3dPrimitiveRange *range;
1089         uint32_t i;
1090         uint32_t maxnum;
1091         int ret;
1092
1093         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1094         if (unlikely(ret != 0))
1095                 return ret;
1096
1097         cmd = container_of(header, struct vmw_draw_cmd, header);
1098         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1099
1100         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1101                 DRM_ERROR("Illegal number of vertex declarations.\n");
1102                 return -EINVAL;
1103         }
1104
1105         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1106                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1107                                         user_surface_converter,
1108                                         &decl->array.surfaceId, NULL);
1109                 if (unlikely(ret != 0))
1110                         return ret;
1111         }
1112
1113         maxnum = (header->size - sizeof(cmd->body) -
1114                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1115         if (unlikely(cmd->body.numRanges > maxnum)) {
1116                 DRM_ERROR("Illegal number of index ranges.\n");
1117                 return -EINVAL;
1118         }
1119
1120         range = (SVGA3dPrimitiveRange *) decl;
1121         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1122                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1123                                         user_surface_converter,
1124                                         &range->indexArray.surfaceId, NULL);
1125                 if (unlikely(ret != 0))
1126                         return ret;
1127         }
1128         return 0;
1129 }
1130
1131
1132 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1133                              struct vmw_sw_context *sw_context,
1134                              SVGA3dCmdHeader *header)
1135 {
1136         struct vmw_tex_state_cmd {
1137                 SVGA3dCmdHeader header;
1138                 SVGA3dCmdSetTextureState state;
1139         };
1140
1141         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1142           ((unsigned long) header + header->size + sizeof(header));
1143         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1144                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1145         int ret;
1146
1147         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1148         if (unlikely(ret != 0))
1149                 return ret;
1150
1151         for (; cur_state < last_state; ++cur_state) {
1152                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1153                         continue;
1154
1155                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1156                                         user_surface_converter,
1157                                         &cur_state->value, NULL);
1158                 if (unlikely(ret != 0))
1159                         return ret;
1160         }
1161
1162         return 0;
1163 }
1164
1165 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1166                                       struct vmw_sw_context *sw_context,
1167                                       void *buf)
1168 {
1169         struct vmw_dma_buffer *vmw_bo;
1170         int ret;
1171
1172         struct {
1173                 uint32_t header;
1174                 SVGAFifoCmdDefineGMRFB body;
1175         } *cmd = buf;
1176
1177         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1178                                       &cmd->body.ptr,
1179                                       &vmw_bo);
1180         if (unlikely(ret != 0))
1181                 return ret;
1182
1183         vmw_dmabuf_unreference(&vmw_bo);
1184
1185         return ret;
1186 }
1187
1188 /**
1189  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1190  *
1191  * @dev_priv: Pointer to a device private struct.
1192  * @sw_context: The software context being used for this batch.
1193  * @res_type: The resource type.
1194  * @converter: Information about user-space binding for this resource type.
1195  * @res_id: Pointer to the user-space resource handle in the command stream.
1196  * @buf_id: Pointer to the user-space backup buffer handle in the command
1197  * stream.
1198  * @backup_offset: Offset of backup into MOB.
1199  *
1200  * This function prepares for registering a switch of backup buffers
1201  * in the resource metadata just prior to unreserving.
1202  */
1203 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1204                                  struct vmw_sw_context *sw_context,
1205                                  enum vmw_res_type res_type,
1206                                  const struct vmw_user_resource_conv
1207                                  *converter,
1208                                  uint32_t *res_id,
1209                                  uint32_t *buf_id,
1210                                  unsigned long backup_offset)
1211 {
1212         int ret;
1213         struct vmw_dma_buffer *dma_buf;
1214         struct vmw_resource_val_node *val_node;
1215
1216         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1217                                 converter, res_id, &val_node);
1218         if (unlikely(ret != 0))
1219                 return ret;
1220
1221         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1222         if (unlikely(ret != 0))
1223                 return ret;
1224
1225         if (val_node->first_usage)
1226                 val_node->no_buffer_needed = true;
1227
1228         vmw_dmabuf_unreference(&val_node->new_backup);
1229         val_node->new_backup = dma_buf;
1230         val_node->new_backup_offset = backup_offset;
1231
1232         return 0;
1233 }
1234
1235 /**
1236  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1237  * command
1238  *
1239  * @dev_priv: Pointer to a device private struct.
1240  * @sw_context: The software context being used for this batch.
1241  * @header: Pointer to the command header in the command stream.
1242  */
1243 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1244                                    struct vmw_sw_context *sw_context,
1245                                    SVGA3dCmdHeader *header)
1246 {
1247         struct vmw_bind_gb_surface_cmd {
1248                 SVGA3dCmdHeader header;
1249                 SVGA3dCmdBindGBSurface body;
1250         } *cmd;
1251
1252         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1253
1254         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1255                                      user_surface_converter,
1256                                      &cmd->body.sid, &cmd->body.mobid,
1257                                      0);
1258 }
1259
1260 /**
1261  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1262  * command
1263  *
1264  * @dev_priv: Pointer to a device private struct.
1265  * @sw_context: The software context being used for this batch.
1266  * @header: Pointer to the command header in the command stream.
1267  */
1268 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1269                                    struct vmw_sw_context *sw_context,
1270                                    SVGA3dCmdHeader *header)
1271 {
1272         struct vmw_gb_surface_cmd {
1273                 SVGA3dCmdHeader header;
1274                 SVGA3dCmdUpdateGBImage body;
1275         } *cmd;
1276
1277         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1278
1279         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1280                                  user_surface_converter,
1281                                  &cmd->body.image.sid, NULL);
1282 }
1283
1284 /**
1285  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1286  * command
1287  *
1288  * @dev_priv: Pointer to a device private struct.
1289  * @sw_context: The software context being used for this batch.
1290  * @header: Pointer to the command header in the command stream.
1291  */
1292 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1293                                      struct vmw_sw_context *sw_context,
1294                                      SVGA3dCmdHeader *header)
1295 {
1296         struct vmw_gb_surface_cmd {
1297                 SVGA3dCmdHeader header;
1298                 SVGA3dCmdUpdateGBSurface body;
1299         } *cmd;
1300
1301         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1302
1303         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1304                                  user_surface_converter,
1305                                  &cmd->body.sid, NULL);
1306 }
1307
1308 /**
1309  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1310  * command
1311  *
1312  * @dev_priv: Pointer to a device private struct.
1313  * @sw_context: The software context being used for this batch.
1314  * @header: Pointer to the command header in the command stream.
1315  */
1316 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1317                                      struct vmw_sw_context *sw_context,
1318                                      SVGA3dCmdHeader *header)
1319 {
1320         struct vmw_gb_surface_cmd {
1321                 SVGA3dCmdHeader header;
1322                 SVGA3dCmdReadbackGBImage body;
1323         } *cmd;
1324
1325         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1326
1327         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1328                                  user_surface_converter,
1329                                  &cmd->body.image.sid, NULL);
1330 }
1331
1332 /**
1333  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1334  * command
1335  *
1336  * @dev_priv: Pointer to a device private struct.
1337  * @sw_context: The software context being used for this batch.
1338  * @header: Pointer to the command header in the command stream.
1339  */
1340 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1341                                        struct vmw_sw_context *sw_context,
1342                                        SVGA3dCmdHeader *header)
1343 {
1344         struct vmw_gb_surface_cmd {
1345                 SVGA3dCmdHeader header;
1346                 SVGA3dCmdReadbackGBSurface body;
1347         } *cmd;
1348
1349         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1350
1351         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1352                                  user_surface_converter,
1353                                  &cmd->body.sid, NULL);
1354 }
1355
1356 /**
1357  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1358  * command
1359  *
1360  * @dev_priv: Pointer to a device private struct.
1361  * @sw_context: The software context being used for this batch.
1362  * @header: Pointer to the command header in the command stream.
1363  */
1364 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1365                                        struct vmw_sw_context *sw_context,
1366                                        SVGA3dCmdHeader *header)
1367 {
1368         struct vmw_gb_surface_cmd {
1369                 SVGA3dCmdHeader header;
1370                 SVGA3dCmdInvalidateGBImage body;
1371         } *cmd;
1372
1373         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1374
1375         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1376                                  user_surface_converter,
1377                                  &cmd->body.image.sid, NULL);
1378 }
1379
1380 /**
1381  * vmw_cmd_invalidate_gb_surface - Validate an
1382  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1383  *
1384  * @dev_priv: Pointer to a device private struct.
1385  * @sw_context: The software context being used for this batch.
1386  * @header: Pointer to the command header in the command stream.
1387  */
1388 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1389                                          struct vmw_sw_context *sw_context,
1390                                          SVGA3dCmdHeader *header)
1391 {
1392         struct vmw_gb_surface_cmd {
1393                 SVGA3dCmdHeader header;
1394                 SVGA3dCmdInvalidateGBSurface body;
1395         } *cmd;
1396
1397         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1398
1399         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1400                                  user_surface_converter,
1401                                  &cmd->body.sid, NULL);
1402 }
1403
1404 /**
1405  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1406  * command
1407  *
1408  * @dev_priv: Pointer to a device private struct.
1409  * @sw_context: The software context being used for this batch.
1410  * @header: Pointer to the command header in the command stream.
1411  */
1412 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1413                               struct vmw_sw_context *sw_context,
1414                               SVGA3dCmdHeader *header)
1415 {
1416         struct vmw_set_shader_cmd {
1417                 SVGA3dCmdHeader header;
1418                 SVGA3dCmdSetShader body;
1419         } *cmd;
1420         int ret;
1421
1422         cmd = container_of(header, struct vmw_set_shader_cmd,
1423                            header);
1424
1425         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1426         if (unlikely(ret != 0))
1427                 return ret;
1428
1429         return 0;
1430 }
1431
1432 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1433                                 struct vmw_sw_context *sw_context,
1434                                 void *buf, uint32_t *size)
1435 {
1436         uint32_t size_remaining = *size;
1437         uint32_t cmd_id;
1438
1439         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1440         switch (cmd_id) {
1441         case SVGA_CMD_UPDATE:
1442                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1443                 break;
1444         case SVGA_CMD_DEFINE_GMRFB:
1445                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1446                 break;
1447         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1448                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1449                 break;
1450         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1451                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1452                 break;
1453         default:
1454                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1455                 return -EINVAL;
1456         }
1457
1458         if (*size > size_remaining) {
1459                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1460                           " %u.\n", cmd_id);
1461                 return -EINVAL;
1462         }
1463
1464         if (unlikely(!sw_context->kernel)) {
1465                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1466                 return -EPERM;
1467         }
1468
1469         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1470                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1471
1472         return 0;
1473 }
1474
1475 typedef int (*vmw_cmd_func) (struct vmw_private *,
1476                              struct vmw_sw_context *,
1477                              SVGA3dCmdHeader *);
1478
1479 #define VMW_CMD_DEF(cmd, func) \
1480         [cmd - SVGA_3D_CMD_BASE] = func
1481
1482 static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
1483         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
1484         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
1485         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
1486         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
1487         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
1488         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
1489         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
1490         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
1491         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
1492         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
1493         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1494                     &vmw_cmd_set_render_target_check),
1495         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
1496         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
1497         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
1498         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
1499         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
1500         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
1501         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
1502         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
1503         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
1504         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
1505         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
1506         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
1507         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
1508         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
1509         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
1510         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
1511         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
1512         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
1513         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1514                     &vmw_cmd_blt_surf_screen_check),
1515         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
1516         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
1517         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
1518         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
1519         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid),
1520         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid),
1521         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface),
1522         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid),
1523         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image),
1524         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1525                     &vmw_cmd_update_gb_surface),
1526         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1527                     &vmw_cmd_readback_gb_image),
1528         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1529                     &vmw_cmd_readback_gb_surface),
1530         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1531                     &vmw_cmd_invalidate_gb_image),
1532         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1533                     &vmw_cmd_invalidate_gb_surface),
1534         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid),
1535         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid),
1536         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid),
1537         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid),
1538         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid),
1539         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query),
1540         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query),
1541         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query),
1542 };
1543
1544 static int vmw_cmd_check(struct vmw_private *dev_priv,
1545                          struct vmw_sw_context *sw_context,
1546                          void *buf, uint32_t *size)
1547 {
1548         uint32_t cmd_id;
1549         uint32_t size_remaining = *size;
1550         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1551         int ret;
1552
1553         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1554         /* Handle any none 3D commands */
1555         if (unlikely(cmd_id < SVGA_CMD_MAX))
1556                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1557
1558
1559         cmd_id = le32_to_cpu(header->id);
1560         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1561
1562         cmd_id -= SVGA_3D_CMD_BASE;
1563         if (unlikely(*size > size_remaining))
1564                 goto out_err;
1565
1566         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1567                 goto out_err;
1568
1569         ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
1570         if (unlikely(ret != 0))
1571                 goto out_err;
1572
1573         return 0;
1574 out_err:
1575         DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
1576                   cmd_id + SVGA_3D_CMD_BASE);
1577         return -EINVAL;
1578 }
1579
1580 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1581                              struct vmw_sw_context *sw_context,
1582                              void *buf,
1583                              uint32_t size)
1584 {
1585         int32_t cur_size = size;
1586         int ret;
1587
1588         sw_context->buf_start = buf;
1589
1590         while (cur_size > 0) {
1591                 size = cur_size;
1592                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1593                 if (unlikely(ret != 0))
1594                         return ret;
1595                 buf = (void *)((unsigned long) buf + size);
1596                 cur_size -= size;
1597         }
1598
1599         if (unlikely(cur_size != 0)) {
1600                 DRM_ERROR("Command verifier out of sync.\n");
1601                 return -EINVAL;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1608 {
1609         sw_context->cur_reloc = 0;
1610 }
1611
1612 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1613 {
1614         uint32_t i;
1615         struct vmw_relocation *reloc;
1616         struct ttm_validate_buffer *validate;
1617         struct ttm_buffer_object *bo;
1618
1619         for (i = 0; i < sw_context->cur_reloc; ++i) {
1620                 reloc = &sw_context->relocs[i];
1621                 validate = &sw_context->val_bufs[reloc->index].base;
1622                 bo = validate->bo;
1623                 switch (bo->mem.mem_type) {
1624                 case TTM_PL_VRAM:
1625                         reloc->location->offset += bo->offset;
1626                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
1627                         break;
1628                 case VMW_PL_GMR:
1629                         reloc->location->gmrId = bo->mem.start;
1630                         break;
1631                 case VMW_PL_MOB:
1632                         *reloc->mob_loc = bo->mem.start;
1633                         break;
1634                 default:
1635                         BUG();
1636                 }
1637         }
1638         vmw_free_relocations(sw_context);
1639 }
1640
1641 /**
1642  * vmw_resource_list_unrefererence - Free up a resource list and unreference
1643  * all resources referenced by it.
1644  *
1645  * @list: The resource list.
1646  */
1647 static void vmw_resource_list_unreference(struct list_head *list)
1648 {
1649         struct vmw_resource_val_node *val, *val_next;
1650
1651         /*
1652          * Drop references to resources held during command submission.
1653          */
1654
1655         list_for_each_entry_safe(val, val_next, list, head) {
1656                 list_del_init(&val->head);
1657                 vmw_resource_unreference(&val->res);
1658                 kfree(val);
1659         }
1660 }
1661
1662 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1663 {
1664         struct vmw_validate_buffer *entry, *next;
1665         struct vmw_resource_val_node *val;
1666
1667         /*
1668          * Drop references to DMA buffers held during command submission.
1669          */
1670         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
1671                                  base.head) {
1672                 list_del(&entry->base.head);
1673                 ttm_bo_unref(&entry->base.bo);
1674                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
1675                 sw_context->cur_val_buf--;
1676         }
1677         BUG_ON(sw_context->cur_val_buf != 0);
1678
1679         list_for_each_entry(val, &sw_context->resource_list, head)
1680                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
1681 }
1682
1683 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1684                                       struct ttm_buffer_object *bo,
1685                                       bool validate_as_mob)
1686 {
1687         int ret;
1688
1689
1690         /*
1691          * Don't validate pinned buffers.
1692          */
1693
1694         if (bo == dev_priv->pinned_bo ||
1695             (bo == dev_priv->dummy_query_bo &&
1696              dev_priv->dummy_query_bo_pinned))
1697                 return 0;
1698
1699         if (validate_as_mob)
1700                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1701
1702         /**
1703          * Put BO in VRAM if there is space, otherwise as a GMR.
1704          * If there is no space in VRAM and GMR ids are all used up,
1705          * start evicting GMRs to make room. If the DMA buffer can't be
1706          * used as a GMR, this will return -ENOMEM.
1707          */
1708
1709         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1710         if (likely(ret == 0 || ret == -ERESTARTSYS))
1711                 return ret;
1712
1713         /**
1714          * If that failed, try VRAM again, this time evicting
1715          * previous contents.
1716          */
1717
1718         DRM_INFO("Falling through to VRAM.\n");
1719         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1720         return ret;
1721 }
1722
1723 static int vmw_validate_buffers(struct vmw_private *dev_priv,
1724                                 struct vmw_sw_context *sw_context)
1725 {
1726         struct vmw_validate_buffer *entry;
1727         int ret;
1728
1729         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1730                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1731                                                  entry->validate_as_mob);
1732                 if (unlikely(ret != 0))
1733                         return ret;
1734         }
1735         return 0;
1736 }
1737
1738 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1739                                  uint32_t size)
1740 {
1741         if (likely(sw_context->cmd_bounce_size >= size))
1742                 return 0;
1743
1744         if (sw_context->cmd_bounce_size == 0)
1745                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1746
1747         while (sw_context->cmd_bounce_size < size) {
1748                 sw_context->cmd_bounce_size =
1749                         PAGE_ALIGN(sw_context->cmd_bounce_size +
1750                                    (sw_context->cmd_bounce_size >> 1));
1751         }
1752
1753         if (sw_context->cmd_bounce != NULL)
1754                 vfree(sw_context->cmd_bounce);
1755
1756         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
1757
1758         if (sw_context->cmd_bounce == NULL) {
1759                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1760                 sw_context->cmd_bounce_size = 0;
1761                 return -ENOMEM;
1762         }
1763
1764         return 0;
1765 }
1766
1767 /**
1768  * vmw_execbuf_fence_commands - create and submit a command stream fence
1769  *
1770  * Creates a fence object and submits a command stream marker.
1771  * If this fails for some reason, We sync the fifo and return NULL.
1772  * It is then safe to fence buffers with a NULL pointer.
1773  *
1774  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
1775  * a userspace handle if @p_handle is not NULL, otherwise not.
1776  */
1777
1778 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1779                                struct vmw_private *dev_priv,
1780                                struct vmw_fence_obj **p_fence,
1781                                uint32_t *p_handle)
1782 {
1783         uint32_t sequence;
1784         int ret;
1785         bool synced = false;
1786
1787         /* p_handle implies file_priv. */
1788         BUG_ON(p_handle != NULL && file_priv == NULL);
1789
1790         ret = vmw_fifo_send_fence(dev_priv, &sequence);
1791         if (unlikely(ret != 0)) {
1792                 DRM_ERROR("Fence submission error. Syncing.\n");
1793                 synced = true;
1794         }
1795
1796         if (p_handle != NULL)
1797                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1798                                             sequence,
1799                                             DRM_VMW_FENCE_FLAG_EXEC,
1800                                             p_fence, p_handle);
1801         else
1802                 ret = vmw_fence_create(dev_priv->fman, sequence,
1803                                        DRM_VMW_FENCE_FLAG_EXEC,
1804                                        p_fence);
1805
1806         if (unlikely(ret != 0 && !synced)) {
1807                 (void) vmw_fallback_wait(dev_priv, false, false,
1808                                          sequence, false,
1809                                          VMW_FENCE_WAIT_TIMEOUT);
1810                 *p_fence = NULL;
1811         }
1812
1813         return 0;
1814 }
1815
1816 /**
1817  * vmw_execbuf_copy_fence_user - copy fence object information to
1818  * user-space.
1819  *
1820  * @dev_priv: Pointer to a vmw_private struct.
1821  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1822  * @ret: Return value from fence object creation.
1823  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1824  * which the information should be copied.
1825  * @fence: Pointer to the fenc object.
1826  * @fence_handle: User-space fence handle.
1827  *
1828  * This function copies fence information to user-space. If copying fails,
1829  * The user-space struct drm_vmw_fence_rep::error member is hopefully
1830  * left untouched, and if it's preloaded with an -EFAULT by user-space,
1831  * the error will hopefully be detected.
1832  * Also if copying fails, user-space will be unable to signal the fence
1833  * object so we wait for it immediately, and then unreference the
1834  * user-space reference.
1835  */
1836 void
1837 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1838                             struct vmw_fpriv *vmw_fp,
1839                             int ret,
1840                             struct drm_vmw_fence_rep __user *user_fence_rep,
1841                             struct vmw_fence_obj *fence,
1842                             uint32_t fence_handle)
1843 {
1844         struct drm_vmw_fence_rep fence_rep;
1845
1846         if (user_fence_rep == NULL)
1847                 return;
1848
1849         memset(&fence_rep, 0, sizeof(fence_rep));
1850
1851         fence_rep.error = ret;
1852         if (ret == 0) {
1853                 BUG_ON(fence == NULL);
1854
1855                 fence_rep.handle = fence_handle;
1856                 fence_rep.seqno = fence->seqno;
1857                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
1858                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
1859         }
1860
1861         /*
1862          * copy_to_user errors will be detected by user space not
1863          * seeing fence_rep::error filled in. Typically
1864          * user-space would have pre-set that member to -EFAULT.
1865          */
1866         ret = copy_to_user(user_fence_rep, &fence_rep,
1867                            sizeof(fence_rep));
1868
1869         /*
1870          * User-space lost the fence object. We need to sync
1871          * and unreference the handle.
1872          */
1873         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1874                 ttm_ref_object_base_unref(vmw_fp->tfile,
1875                                           fence_handle, TTM_REF_USAGE);
1876                 DRM_ERROR("Fence copy error. Syncing.\n");
1877                 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
1878                                           false, false,
1879                                           VMW_FENCE_WAIT_TIMEOUT);
1880         }
1881 }
1882
1883 int vmw_execbuf_process(struct drm_file *file_priv,
1884                         struct vmw_private *dev_priv,
1885                         void __user *user_commands,
1886                         void *kernel_commands,
1887                         uint32_t command_size,
1888                         uint64_t throttle_us,
1889                         struct drm_vmw_fence_rep __user *user_fence_rep,
1890                         struct vmw_fence_obj **out_fence)
1891 {
1892         struct vmw_sw_context *sw_context = &dev_priv->ctx;
1893         struct vmw_fence_obj *fence = NULL;
1894         struct vmw_resource *error_resource;
1895         struct list_head resource_list;
1896         struct ww_acquire_ctx ticket;
1897         uint32_t handle;
1898         void *cmd;
1899         int ret;
1900
1901         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
1902         if (unlikely(ret != 0))
1903                 return -ERESTARTSYS;
1904
1905         if (kernel_commands == NULL) {
1906                 sw_context->kernel = false;
1907
1908                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
1909                 if (unlikely(ret != 0))
1910                         goto out_unlock;
1911
1912
1913                 ret = copy_from_user(sw_context->cmd_bounce,
1914                                      user_commands, command_size);
1915
1916                 if (unlikely(ret != 0)) {
1917                         ret = -EFAULT;
1918                         DRM_ERROR("Failed copying commands.\n");
1919                         goto out_unlock;
1920                 }
1921                 kernel_commands = sw_context->cmd_bounce;
1922         } else
1923                 sw_context->kernel = true;
1924
1925         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1926         sw_context->cur_reloc = 0;
1927         sw_context->cur_val_buf = 0;
1928         sw_context->fence_flags = 0;
1929         INIT_LIST_HEAD(&sw_context->resource_list);
1930         sw_context->cur_query_bo = dev_priv->pinned_bo;
1931         sw_context->last_query_ctx = NULL;
1932         sw_context->needs_post_query_barrier = false;
1933         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
1934         INIT_LIST_HEAD(&sw_context->validate_nodes);
1935         INIT_LIST_HEAD(&sw_context->res_relocations);
1936         if (!sw_context->res_ht_initialized) {
1937                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
1938                 if (unlikely(ret != 0))
1939                         goto out_unlock;
1940                 sw_context->res_ht_initialized = true;
1941         }
1942
1943         INIT_LIST_HEAD(&resource_list);
1944         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1945                                 command_size);
1946         if (unlikely(ret != 0))
1947                 goto out_err;
1948
1949         ret = vmw_resources_reserve(sw_context);
1950         if (unlikely(ret != 0))
1951                 goto out_err;
1952
1953         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
1954         if (unlikely(ret != 0))
1955                 goto out_err;
1956
1957         ret = vmw_validate_buffers(dev_priv, sw_context);
1958         if (unlikely(ret != 0))
1959                 goto out_err;
1960
1961         ret = vmw_resources_validate(sw_context);
1962         if (unlikely(ret != 0))
1963                 goto out_err;
1964
1965         if (throttle_us) {
1966                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
1967                                    throttle_us);
1968
1969                 if (unlikely(ret != 0))
1970                         goto out_err;
1971         }
1972
1973         cmd = vmw_fifo_reserve(dev_priv, command_size);
1974         if (unlikely(cmd == NULL)) {
1975                 DRM_ERROR("Failed reserving fifo space for commands.\n");
1976                 ret = -ENOMEM;
1977                 goto out_err;
1978         }
1979
1980         vmw_apply_relocations(sw_context);
1981         memcpy(cmd, kernel_commands, command_size);
1982
1983         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
1984         vmw_resource_relocations_free(&sw_context->res_relocations);
1985
1986         vmw_fifo_commit(dev_priv, command_size);
1987
1988         vmw_query_bo_switch_commit(dev_priv, sw_context);
1989         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1990                                          &fence,
1991                                          (user_fence_rep) ? &handle : NULL);
1992         /*
1993          * This error is harmless, because if fence submission fails,
1994          * vmw_fifo_send_fence will sync. The error will be propagated to
1995          * user-space in @fence_rep
1996          */
1997
1998         if (ret != 0)
1999                 DRM_ERROR("Fence submission error. Syncing.\n");
2000
2001         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2002         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2003                                     (void *) fence);
2004
2005         if (unlikely(dev_priv->pinned_bo != NULL &&
2006                      !dev_priv->query_cid_valid))
2007                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2008
2009         vmw_clear_validations(sw_context);
2010         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2011                                     user_fence_rep, fence, handle);
2012
2013         /* Don't unreference when handing fence out */
2014         if (unlikely(out_fence != NULL)) {
2015                 *out_fence = fence;
2016                 fence = NULL;
2017         } else if (likely(fence != NULL)) {
2018                 vmw_fence_obj_unreference(&fence);
2019         }
2020
2021         list_splice_init(&sw_context->resource_list, &resource_list);
2022         mutex_unlock(&dev_priv->cmdbuf_mutex);
2023
2024         /*
2025          * Unreference resources outside of the cmdbuf_mutex to
2026          * avoid deadlocks in resource destruction paths.
2027          */
2028         vmw_resource_list_unreference(&resource_list);
2029
2030         return 0;
2031
2032 out_err:
2033         vmw_resource_relocations_free(&sw_context->res_relocations);
2034         vmw_free_relocations(sw_context);
2035         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2036         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2037         vmw_clear_validations(sw_context);
2038         if (unlikely(dev_priv->pinned_bo != NULL &&
2039                      !dev_priv->query_cid_valid))
2040                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2041 out_unlock:
2042         list_splice_init(&sw_context->resource_list, &resource_list);
2043         error_resource = sw_context->error_resource;
2044         sw_context->error_resource = NULL;
2045         mutex_unlock(&dev_priv->cmdbuf_mutex);
2046
2047         /*
2048          * Unreference resources outside of the cmdbuf_mutex to
2049          * avoid deadlocks in resource destruction paths.
2050          */
2051         vmw_resource_list_unreference(&resource_list);
2052         if (unlikely(error_resource != NULL))
2053                 vmw_resource_unreference(&error_resource);
2054
2055         return ret;
2056 }
2057
2058 /**
2059  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2060  *
2061  * @dev_priv: The device private structure.
2062  *
2063  * This function is called to idle the fifo and unpin the query buffer
2064  * if the normal way to do this hits an error, which should typically be
2065  * extremely rare.
2066  */
2067 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2068 {
2069         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2070
2071         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2072         vmw_bo_pin(dev_priv->pinned_bo, false);
2073         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2074         dev_priv->dummy_query_bo_pinned = false;
2075 }
2076
2077
2078 /**
2079  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2080  * query bo.
2081  *
2082  * @dev_priv: The device private structure.
2083  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2084  * _after_ a query barrier that flushes all queries touching the current
2085  * buffer pointed to by @dev_priv->pinned_bo
2086  *
2087  * This function should be used to unpin the pinned query bo, or
2088  * as a query barrier when we need to make sure that all queries have
2089  * finished before the next fifo command. (For example on hardware
2090  * context destructions where the hardware may otherwise leak unfinished
2091  * queries).
2092  *
2093  * This function does not return any failure codes, but make attempts
2094  * to do safe unpinning in case of errors.
2095  *
2096  * The function will synchronize on the previous query barrier, and will
2097  * thus not finish until that barrier has executed.
2098  *
2099  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2100  * before calling this function.
2101  */
2102 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2103                                      struct vmw_fence_obj *fence)
2104 {
2105         int ret = 0;
2106         struct list_head validate_list;
2107         struct ttm_validate_buffer pinned_val, query_val;
2108         struct vmw_fence_obj *lfence = NULL;
2109         struct ww_acquire_ctx ticket;
2110
2111         if (dev_priv->pinned_bo == NULL)
2112                 goto out_unlock;
2113
2114         INIT_LIST_HEAD(&validate_list);
2115
2116         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2117         list_add_tail(&pinned_val.head, &validate_list);
2118
2119         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2120         list_add_tail(&query_val.head, &validate_list);
2121
2122         do {
2123                 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2124         } while (ret == -ERESTARTSYS);
2125
2126         if (unlikely(ret != 0)) {
2127                 vmw_execbuf_unpin_panic(dev_priv);
2128                 goto out_no_reserve;
2129         }
2130
2131         if (dev_priv->query_cid_valid) {
2132                 BUG_ON(fence != NULL);
2133                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2134                 if (unlikely(ret != 0)) {
2135                         vmw_execbuf_unpin_panic(dev_priv);
2136                         goto out_no_emit;
2137                 }
2138                 dev_priv->query_cid_valid = false;
2139         }
2140
2141         vmw_bo_pin(dev_priv->pinned_bo, false);
2142         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2143         dev_priv->dummy_query_bo_pinned = false;
2144
2145         if (fence == NULL) {
2146                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2147                                                   NULL);
2148                 fence = lfence;
2149         }
2150         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2151         if (lfence != NULL)
2152                 vmw_fence_obj_unreference(&lfence);
2153
2154         ttm_bo_unref(&query_val.bo);
2155         ttm_bo_unref(&pinned_val.bo);
2156         ttm_bo_unref(&dev_priv->pinned_bo);
2157
2158 out_unlock:
2159         return;
2160
2161 out_no_emit:
2162         ttm_eu_backoff_reservation(&ticket, &validate_list);
2163 out_no_reserve:
2164         ttm_bo_unref(&query_val.bo);
2165         ttm_bo_unref(&pinned_val.bo);
2166         ttm_bo_unref(&dev_priv->pinned_bo);
2167 }
2168
2169 /**
2170  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2171  * query bo.
2172  *
2173  * @dev_priv: The device private structure.
2174  *
2175  * This function should be used to unpin the pinned query bo, or
2176  * as a query barrier when we need to make sure that all queries have
2177  * finished before the next fifo command. (For example on hardware
2178  * context destructions where the hardware may otherwise leak unfinished
2179  * queries).
2180  *
2181  * This function does not return any failure codes, but make attempts
2182  * to do safe unpinning in case of errors.
2183  *
2184  * The function will synchronize on the previous query barrier, and will
2185  * thus not finish until that barrier has executed.
2186  */
2187 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2188 {
2189         mutex_lock(&dev_priv->cmdbuf_mutex);
2190         if (dev_priv->query_cid_valid)
2191                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2192         mutex_unlock(&dev_priv->cmdbuf_mutex);
2193 }
2194
2195
2196 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2197                       struct drm_file *file_priv)
2198 {
2199         struct vmw_private *dev_priv = vmw_priv(dev);
2200         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2201         struct vmw_master *vmaster = vmw_master(file_priv->master);
2202         int ret;
2203
2204         /*
2205          * This will allow us to extend the ioctl argument while
2206          * maintaining backwards compatibility:
2207          * We take different code paths depending on the value of
2208          * arg->version.
2209          */
2210
2211         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2212                 DRM_ERROR("Incorrect execbuf version.\n");
2213                 DRM_ERROR("You're running outdated experimental "
2214                           "vmwgfx user-space drivers.");
2215                 return -EINVAL;
2216         }
2217
2218         ret = ttm_read_lock(&vmaster->lock, true);
2219         if (unlikely(ret != 0))
2220                 return ret;
2221
2222         ret = vmw_execbuf_process(file_priv, dev_priv,
2223                                   (void __user *)(unsigned long)arg->commands,
2224                                   NULL, arg->command_size, arg->throttle_us,
2225                                   (void __user *)(unsigned long)arg->fence_rep,
2226                                   NULL);
2227
2228         if (unlikely(ret != 0))
2229                 goto out_unlock;
2230
2231         vmw_kms_cursor_post_execbuf(dev_priv);
2232
2233 out_unlock:
2234         ttm_read_unlock(&vmaster->lock);
2235         return ret;
2236 }