]> Pileus Git - ~andy/linux/blob - drivers/staging/android/sync.c
f84caad35d6809b27211f8f1fa8eec688309b230
[~andy/linux] / drivers / staging / android / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/anon_inodes.h>
26
27 #include "sync.h"
28
29 static void sync_fence_signal_pt(struct sync_pt *pt);
30 static int _sync_pt_has_signaled(struct sync_pt *pt);
31
32 static LIST_HEAD(sync_timeline_list_head);
33 static DEFINE_SPINLOCK(sync_timeline_list_lock);
34
35 static LIST_HEAD(sync_fence_list_head);
36 static DEFINE_SPINLOCK(sync_fence_list_lock);
37
38 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
39                                            int size, const char *name)
40 {
41         struct sync_timeline *obj;
42         unsigned long flags;
43
44         if (size < sizeof(struct sync_timeline))
45                 return NULL;
46
47         obj = kzalloc(size, GFP_KERNEL);
48         if (obj == NULL)
49                 return NULL;
50
51         obj->ops = ops;
52         strlcpy(obj->name, name, sizeof(obj->name));
53
54         INIT_LIST_HEAD(&obj->child_list_head);
55         spin_lock_init(&obj->child_list_lock);
56
57         INIT_LIST_HEAD(&obj->active_list_head);
58         spin_lock_init(&obj->active_list_lock);
59
60         spin_lock_irqsave(&sync_timeline_list_lock, flags);
61         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
62         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
63
64         return obj;
65 }
66
67 static void sync_timeline_free(struct sync_timeline *obj)
68 {
69         unsigned long flags;
70
71         if (obj->ops->release_obj)
72                 obj->ops->release_obj(obj);
73
74         spin_lock_irqsave(&sync_timeline_list_lock, flags);
75         list_del(&obj->sync_timeline_list);
76         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
77
78         kfree(obj);
79 }
80
81 void sync_timeline_destroy(struct sync_timeline *obj)
82 {
83         unsigned long flags;
84         bool needs_freeing;
85
86         spin_lock_irqsave(&obj->child_list_lock, flags);
87         obj->destroyed = true;
88         needs_freeing = list_empty(&obj->child_list_head);
89         spin_unlock_irqrestore(&obj->child_list_lock, flags);
90
91         if (needs_freeing)
92                 sync_timeline_free(obj);
93         else
94                 sync_timeline_signal(obj);
95 }
96
97 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
98 {
99         unsigned long flags;
100
101         pt->parent = obj;
102
103         spin_lock_irqsave(&obj->child_list_lock, flags);
104         list_add_tail(&pt->child_list, &obj->child_list_head);
105         spin_unlock_irqrestore(&obj->child_list_lock, flags);
106 }
107
108 static void sync_timeline_remove_pt(struct sync_pt *pt)
109 {
110         struct sync_timeline *obj = pt->parent;
111         unsigned long flags;
112         bool needs_freeing;
113
114         spin_lock_irqsave(&obj->active_list_lock, flags);
115         if (!list_empty(&pt->active_list))
116                 list_del_init(&pt->active_list);
117         spin_unlock_irqrestore(&obj->active_list_lock, flags);
118
119         spin_lock_irqsave(&obj->child_list_lock, flags);
120         list_del(&pt->child_list);
121         needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
122         spin_unlock_irqrestore(&obj->child_list_lock, flags);
123
124         if (needs_freeing)
125                 sync_timeline_free(obj);
126 }
127
128 void sync_timeline_signal(struct sync_timeline *obj)
129 {
130         unsigned long flags;
131         LIST_HEAD(signaled_pts);
132         struct list_head *pos, *n;
133
134         spin_lock_irqsave(&obj->active_list_lock, flags);
135
136         list_for_each_safe(pos, n, &obj->active_list_head) {
137                 struct sync_pt *pt =
138                         container_of(pos, struct sync_pt, active_list);
139
140                 if (_sync_pt_has_signaled(pt))
141                         list_move(pos, &signaled_pts);
142         }
143
144         spin_unlock_irqrestore(&obj->active_list_lock, flags);
145
146         list_for_each_safe(pos, n, &signaled_pts) {
147                 struct sync_pt *pt =
148                         container_of(pos, struct sync_pt, active_list);
149
150                 list_del_init(pos);
151                 sync_fence_signal_pt(pt);
152         }
153 }
154
155 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
156 {
157         struct sync_pt *pt;
158
159         if (size < sizeof(struct sync_pt))
160                 return NULL;
161
162         pt = kzalloc(size, GFP_KERNEL);
163         if (pt == NULL)
164                 return NULL;
165
166         INIT_LIST_HEAD(&pt->active_list);
167         sync_timeline_add_pt(parent, pt);
168
169         return pt;
170 }
171
172 void sync_pt_free(struct sync_pt *pt)
173 {
174         if (pt->parent->ops->free_pt)
175                 pt->parent->ops->free_pt(pt);
176
177         sync_timeline_remove_pt(pt);
178
179         kfree(pt);
180 }
181
182 /* call with pt->parent->active_list_lock held */
183 static int _sync_pt_has_signaled(struct sync_pt *pt)
184 {
185         int old_status = pt->status;
186
187         if (!pt->status)
188                 pt->status = pt->parent->ops->has_signaled(pt);
189
190         if (!pt->status && pt->parent->destroyed)
191                 pt->status = -ENOENT;
192
193         if (pt->status != old_status)
194                 pt->timestamp = ktime_get();
195
196         return pt->status;
197 }
198
199 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
200 {
201         return pt->parent->ops->dup(pt);
202 }
203
204 /* Adds a sync pt to the active queue.  Called when added to a fence */
205 static void sync_pt_activate(struct sync_pt *pt)
206 {
207         struct sync_timeline *obj = pt->parent;
208         unsigned long flags;
209         int err;
210
211         spin_lock_irqsave(&obj->active_list_lock, flags);
212
213         err = _sync_pt_has_signaled(pt);
214         if (err != 0)
215                 goto out;
216
217         list_add_tail(&pt->active_list, &obj->active_list_head);
218
219 out:
220         spin_unlock_irqrestore(&obj->active_list_lock, flags);
221 }
222
223 static int sync_fence_release(struct inode *inode, struct file *file);
224 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
225                              unsigned long arg);
226
227
228 static const struct file_operations sync_fence_fops = {
229         .release = sync_fence_release,
230         .unlocked_ioctl = sync_fence_ioctl,
231 };
232
233 static struct sync_fence *sync_fence_alloc(const char *name)
234 {
235         struct sync_fence *fence;
236         unsigned long flags;
237
238         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
239         if (fence == NULL)
240                 return NULL;
241
242         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
243                                          fence, 0);
244         if (fence->file == NULL)
245                 goto err;
246
247         strlcpy(fence->name, name, sizeof(fence->name));
248
249         INIT_LIST_HEAD(&fence->pt_list_head);
250         INIT_LIST_HEAD(&fence->waiter_list_head);
251         spin_lock_init(&fence->waiter_list_lock);
252
253         init_waitqueue_head(&fence->wq);
254
255         spin_lock_irqsave(&sync_fence_list_lock, flags);
256         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
257         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
258
259         return fence;
260
261 err:
262         kfree(fence);
263         return NULL;
264 }
265
266 /* TODO: implement a create which takes more that one sync_pt */
267 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
268 {
269         struct sync_fence *fence;
270
271         if (pt->fence)
272                 return NULL;
273
274         fence = sync_fence_alloc(name);
275         if (fence == NULL)
276                 return NULL;
277
278         pt->fence = fence;
279         list_add(&pt->pt_list, &fence->pt_list_head);
280         sync_pt_activate(pt);
281
282         return fence;
283 }
284
285 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
286 {
287         struct list_head *pos;
288
289         list_for_each(pos, &src->pt_list_head) {
290                 struct sync_pt *orig_pt =
291                         container_of(pos, struct sync_pt, pt_list);
292                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
293
294                 if (new_pt == NULL)
295                         return -ENOMEM;
296
297                 new_pt->fence = dst;
298                 list_add(&new_pt->pt_list, &dst->pt_list_head);
299                 sync_pt_activate(new_pt);
300         }
301
302         return 0;
303 }
304
305 static void sync_fence_free_pts(struct sync_fence *fence)
306 {
307         struct list_head *pos, *n;
308
309         list_for_each_safe(pos, n, &fence->pt_list_head) {
310                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
311                 sync_pt_free(pt);
312         }
313 }
314
315 struct sync_fence *sync_fence_fdget(int fd)
316 {
317         struct file *file = fget(fd);
318
319         if (file == NULL)
320                 return NULL;
321
322         if (file->f_op != &sync_fence_fops)
323                 goto err;
324
325         return file->private_data;
326
327 err:
328         fput(file);
329         return NULL;
330 }
331
332 void sync_fence_put(struct sync_fence *fence)
333 {
334         fput(fence->file);
335 }
336
337 void sync_fence_install(struct sync_fence *fence, int fd)
338 {
339         fd_install(fd, fence->file);
340 }
341
342 static int sync_fence_get_status(struct sync_fence *fence)
343 {
344         struct list_head *pos;
345         int status = 1;
346
347         list_for_each(pos, &fence->pt_list_head) {
348                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
349                 int pt_status = pt->status;
350
351                 if (pt_status < 0) {
352                         status = pt_status;
353                         break;
354                 } else if (status == 1) {
355                         status = pt_status;
356                 }
357         }
358
359         return status;
360 }
361
362 struct sync_fence *sync_fence_merge(const char *name,
363                                     struct sync_fence *a, struct sync_fence *b)
364 {
365         struct sync_fence *fence;
366         int err;
367
368         fence = sync_fence_alloc(name);
369         if (fence == NULL)
370                 return NULL;
371
372         err = sync_fence_copy_pts(fence, a);
373         if (err < 0)
374                 goto err;
375
376         err = sync_fence_copy_pts(fence, b);
377         if (err < 0)
378                 goto err;
379
380         fence->status = sync_fence_get_status(fence);
381
382         return fence;
383 err:
384         sync_fence_free_pts(fence);
385         kfree(fence);
386         return NULL;
387 }
388
389 static void sync_fence_signal_pt(struct sync_pt *pt)
390 {
391         LIST_HEAD(signaled_waiters);
392         struct sync_fence *fence = pt->fence;
393         struct list_head *pos;
394         struct list_head *n;
395         unsigned long flags;
396         int status;
397
398         status = sync_fence_get_status(fence);
399
400         spin_lock_irqsave(&fence->waiter_list_lock, flags);
401         /*
402          * this should protect against two threads racing on the signaled
403          * false -> true transition
404          */
405         if (status && !fence->status) {
406                 list_for_each_safe(pos, n, &fence->waiter_list_head)
407                         list_move(pos, &signaled_waiters);
408
409                 fence->status = status;
410         } else {
411                 status = 0;
412         }
413         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
414
415         if (status) {
416                 list_for_each_safe(pos, n, &signaled_waiters) {
417                         struct sync_fence_waiter *waiter =
418                                 container_of(pos, struct sync_fence_waiter,
419                                              waiter_list);
420
421                         waiter->callback(fence, waiter->callback_data);
422                         list_del(pos);
423                         kfree(waiter);
424                 }
425                 wake_up(&fence->wq);
426         }
427 }
428
429 int sync_fence_wait_async(struct sync_fence *fence,
430                           void (*callback)(struct sync_fence *, void *data),
431                           void *callback_data)
432 {
433         struct sync_fence_waiter *waiter;
434         unsigned long flags;
435         int err = 0;
436
437         waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
438         if (waiter == NULL)
439                 return -ENOMEM;
440
441         waiter->callback = callback;
442         waiter->callback_data = callback_data;
443
444         spin_lock_irqsave(&fence->waiter_list_lock, flags);
445
446         if (fence->status) {
447                 kfree(waiter);
448                 err = fence->status;
449                 goto out;
450         }
451
452         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
453 out:
454         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
455
456         return err;
457 }
458
459 int sync_fence_wait(struct sync_fence *fence, long timeout)
460 {
461         int err;
462
463         if (timeout) {
464                 timeout = msecs_to_jiffies(timeout);
465                 err = wait_event_interruptible_timeout(fence->wq,
466                                                        fence->status != 0,
467                                                        timeout);
468         } else {
469                 err = wait_event_interruptible(fence->wq, fence->status != 0);
470         }
471
472         if (err < 0)
473                 return err;
474
475         if (fence->status < 0)
476                 return fence->status;
477
478         if (fence->status == 0)
479                 return -ETIME;
480
481         return 0;
482 }
483
484 static int sync_fence_release(struct inode *inode, struct file *file)
485 {
486         struct sync_fence *fence = file->private_data;
487         unsigned long flags;
488
489         sync_fence_free_pts(fence);
490
491         spin_lock_irqsave(&sync_fence_list_lock, flags);
492         list_del(&fence->sync_fence_list);
493         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
494
495         kfree(fence);
496
497         return 0;
498 }
499
500 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
501 {
502         __s32 value;
503
504         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
505                 return -EFAULT;
506
507         return sync_fence_wait(fence, value);
508 }
509
510 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
511 {
512         int fd = get_unused_fd();
513         int err;
514         struct sync_fence *fence2, *fence3;
515         struct sync_merge_data data;
516
517         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
518                 return -EFAULT;
519
520         fence2 = sync_fence_fdget(data.fd2);
521         if (fence2 == NULL) {
522                 err = -ENOENT;
523                 goto err_put_fd;
524         }
525
526         data.name[sizeof(data.name) - 1] = '\0';
527         fence3 = sync_fence_merge(data.name, fence, fence2);
528         if (fence3 == NULL) {
529                 err = -ENOMEM;
530                 goto err_put_fence2;
531         }
532
533         data.fence = fd;
534         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
535                 err = -EFAULT;
536                 goto err_put_fence3;
537         }
538
539         sync_fence_install(fence3, fd);
540         sync_fence_put(fence2);
541         return 0;
542
543 err_put_fence3:
544         sync_fence_put(fence3);
545
546 err_put_fence2:
547         sync_fence_put(fence2);
548
549 err_put_fd:
550         put_unused_fd(fd);
551         return err;
552 }
553
554 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
555 {
556         struct sync_pt_info *info = data;
557         int ret;
558
559         if (size < sizeof(struct sync_pt_info))
560                 return -ENOMEM;
561
562         info->len = sizeof(struct sync_pt_info);
563
564         if (pt->parent->ops->fill_driver_data) {
565                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
566                                                         size - sizeof(*info));
567                 if (ret < 0)
568                         return ret;
569
570                 info->len += ret;
571         }
572
573         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
574         strlcpy(info->driver_name, pt->parent->ops->driver_name,
575                 sizeof(info->driver_name));
576         info->status = pt->status;
577         info->timestamp_ns = ktime_to_ns(pt->timestamp);
578
579         return info->len;
580 }
581
582 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
583                                         unsigned long arg)
584 {
585         struct sync_fence_info_data *data;
586         struct list_head *pos;
587         __u32 size;
588         __u32 len = 0;
589         int ret;
590
591         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
592                 return -EFAULT;
593
594         if (size < sizeof(struct sync_fence_info_data))
595                 return -EINVAL;
596
597         if (size > 4096)
598                 size = 4096;
599
600         data = kzalloc(size, GFP_KERNEL);
601         if (data == NULL)
602                 return -ENOMEM;
603
604         strlcpy(data->name, fence->name, sizeof(data->name));
605         data->status = fence->status;
606         len = sizeof(struct sync_fence_info_data);
607
608         list_for_each(pos, &fence->pt_list_head) {
609                 struct sync_pt *pt =
610                         container_of(pos, struct sync_pt, pt_list);
611
612                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
613
614                 if (ret < 0)
615                         goto out;
616
617                 len += ret;
618         }
619
620         data->len = len;
621
622         if (copy_to_user((void __user *)arg, data, len))
623                 ret = -EFAULT;
624         else
625                 ret = 0;
626
627 out:
628         kfree(data);
629
630         return ret;
631 }
632
633 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
634                              unsigned long arg)
635 {
636         struct sync_fence *fence = file->private_data;
637         switch (cmd) {
638         case SYNC_IOC_WAIT:
639                 return sync_fence_ioctl_wait(fence, arg);
640
641         case SYNC_IOC_MERGE:
642                 return sync_fence_ioctl_merge(fence, arg);
643
644         case SYNC_IOC_FENCE_INFO:
645                 return sync_fence_ioctl_fence_info(fence, arg);
646
647         default:
648                 return -ENOTTY;
649         }
650 }
651
652 #ifdef CONFIG_DEBUG_FS
653 static const char *sync_status_str(int status)
654 {
655         if (status > 0)
656                 return "signaled";
657         else if (status == 0)
658                 return "active";
659         else
660                 return "error";
661 }
662
663 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
664 {
665         int status = pt->status;
666         seq_printf(s, "  %s%spt %s",
667                    fence ? pt->parent->name : "",
668                    fence ? "_" : "",
669                    sync_status_str(status));
670         if (pt->status) {
671                 struct timeval tv = ktime_to_timeval(pt->timestamp);
672                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
673         }
674
675         if (pt->parent->ops->print_pt) {
676                 seq_printf(s, ": ");
677                 pt->parent->ops->print_pt(s, pt);
678         }
679
680         seq_printf(s, "\n");
681 }
682
683 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
684 {
685         struct list_head *pos;
686         unsigned long flags;
687
688         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
689
690         if (obj->ops->print_obj) {
691                 seq_printf(s, ": ");
692                 obj->ops->print_obj(s, obj);
693         }
694
695         seq_printf(s, "\n");
696
697         spin_lock_irqsave(&obj->child_list_lock, flags);
698         list_for_each(pos, &obj->child_list_head) {
699                 struct sync_pt *pt =
700                         container_of(pos, struct sync_pt, child_list);
701                 sync_print_pt(s, pt, false);
702         }
703         spin_unlock_irqrestore(&obj->child_list_lock, flags);
704 }
705
706 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
707 {
708         struct list_head *pos;
709         unsigned long flags;
710
711         seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
712
713         list_for_each(pos, &fence->pt_list_head) {
714                 struct sync_pt *pt =
715                         container_of(pos, struct sync_pt, pt_list);
716                 sync_print_pt(s, pt, true);
717         }
718
719         spin_lock_irqsave(&fence->waiter_list_lock, flags);
720         list_for_each(pos, &fence->waiter_list_head) {
721                 struct sync_fence_waiter *waiter =
722                         container_of(pos, struct sync_fence_waiter,
723                                      waiter_list);
724
725                 seq_printf(s, "waiter %pF %p\n", waiter->callback,
726                            waiter->callback_data);
727         }
728         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
729 }
730
731 static int sync_debugfs_show(struct seq_file *s, void *unused)
732 {
733         unsigned long flags;
734         struct list_head *pos;
735
736         seq_printf(s, "objs:\n--------------\n");
737
738         spin_lock_irqsave(&sync_timeline_list_lock, flags);
739         list_for_each(pos, &sync_timeline_list_head) {
740                 struct sync_timeline *obj =
741                         container_of(pos, struct sync_timeline,
742                                      sync_timeline_list);
743
744                 sync_print_obj(s, obj);
745                 seq_printf(s, "\n");
746         }
747         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
748
749         seq_printf(s, "fences:\n--------------\n");
750
751         spin_lock_irqsave(&sync_fence_list_lock, flags);
752         list_for_each(pos, &sync_fence_list_head) {
753                 struct sync_fence *fence =
754                         container_of(pos, struct sync_fence, sync_fence_list);
755
756                 sync_print_fence(s, fence);
757                 seq_printf(s, "\n");
758         }
759         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
760         return 0;
761 }
762
763 static int sync_debugfs_open(struct inode *inode, struct file *file)
764 {
765         return single_open(file, sync_debugfs_show, inode->i_private);
766 }
767
768 static const struct file_operations sync_debugfs_fops = {
769         .open           = sync_debugfs_open,
770         .read           = seq_read,
771         .llseek         = seq_lseek,
772         .release        = single_release,
773 };
774
775 static __init int sync_debugfs_init(void)
776 {
777         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
778         return 0;
779 }
780
781 late_initcall(sync_debugfs_init);
782
783 #endif