11
11
#include " flutter/fml/memory/ref_counted.h"
12
12
#include " flutter/fml/memory/weak_ptr.h"
13
13
#include " flutter/fml/task_runner.h"
14
+ #include " flutter/fml/trace_event.h"
14
15
#include " third_party/skia/include/core/SkRefCnt.h"
15
16
#include " third_party/skia/include/gpu/GrDirectContext.h"
16
17
17
18
namespace flutter {
18
19
19
20
// A queue that holds Skia objects that must be destructed on the given task
20
21
// runner.
21
- class SkiaUnrefQueue : public fml ::RefCountedThreadSafe<SkiaUnrefQueue> {
22
+ template <class T >
23
+ class UnrefQueue : public fml ::RefCountedThreadSafe<UnrefQueue<T>> {
22
24
public:
23
- void Unref (SkRefCnt* object);
25
+ using ResourceContext = T;
26
+
27
+ void Unref (SkRefCnt* object) {
28
+ std::scoped_lock lock (mutex_);
29
+ objects_.push_back (object);
30
+ if (!drain_pending_) {
31
+ drain_pending_ = true ;
32
+ task_runner_->PostDelayedTask (
33
+ [strong = fml::Ref (this )]() { strong->Drain (); }, drain_delay_);
34
+ }
35
+ }
24
36
25
37
// Usually, the drain is called automatically. However, during IO manager
26
38
// shutdown (when the platform side reference to the OpenGL context is about
27
39
// to go away), we may need to pre-emptively drain the unref queue. It is the
28
40
// responsibility of the caller to ensure that no further unrefs are queued
29
41
// after this call.
30
- void Drain ();
42
+ void Drain () {
43
+ TRACE_EVENT0 (" flutter" , " SkiaUnrefQueue::Drain" );
44
+ std::deque<SkRefCnt*> skia_objects;
45
+ {
46
+ std::scoped_lock lock (mutex_);
47
+ objects_.swap (skia_objects);
48
+ drain_pending_ = false ;
49
+ }
50
+ DoDrain (skia_objects, context_);
51
+ }
31
52
32
- void UpdateResourceContext (sk_sp<GrDirectContext > context) {
53
+ void UpdateResourceContext (sk_sp<ResourceContext > context) {
33
54
context_ = context;
34
55
}
35
56
@@ -39,25 +60,47 @@ class SkiaUnrefQueue : public fml::RefCountedThreadSafe<SkiaUnrefQueue> {
39
60
std::mutex mutex_;
40
61
std::deque<SkRefCnt*> objects_;
41
62
bool drain_pending_;
42
- sk_sp<GrDirectContext > context_;
63
+ sk_sp<ResourceContext > context_;
43
64
44
65
// The `GrDirectContext* context` is only used for signaling Skia to
45
66
// performDeferredCleanup. It can be nullptr when such signaling is not needed
46
67
// (e.g., in unit tests).
47
- SkiaUnrefQueue (fml::RefPtr<fml::TaskRunner> task_runner,
48
- fml::TimeDelta delay,
49
- sk_sp<GrDirectContext> context = nullptr );
50
-
51
- ~SkiaUnrefQueue ();
68
+ UnrefQueue (fml::RefPtr<fml::TaskRunner> task_runner,
69
+ fml::TimeDelta delay,
70
+ sk_sp<ResourceContext> context = nullptr )
71
+ : task_runner_(std::move(task_runner)),
72
+ drain_delay_ (delay),
73
+ drain_pending_(false ),
74
+ context_(context) {}
75
+
76
+ ~UnrefQueue () {
77
+ fml::TaskRunner::RunNowOrPostTask (
78
+ task_runner_, [objects = std::move (objects_),
79
+ context = std::move (context_)]() mutable {
80
+ DoDrain (objects, context);
81
+ context.reset ();
82
+ });
83
+ }
52
84
85
+ // static
53
86
static void DoDrain (const std::deque<SkRefCnt*>& skia_objects,
54
- sk_sp<GrDirectContext> context);
87
+ sk_sp<ResourceContext> context) {
88
+ for (SkRefCnt* skia_object : skia_objects) {
89
+ skia_object->unref ();
90
+ }
91
+
92
+ if (context && skia_objects.size () > 0 ) {
93
+ context->performDeferredCleanup (std::chrono::milliseconds (0 ));
94
+ }
95
+ }
55
96
56
- FML_FRIEND_REF_COUNTED_THREAD_SAFE (SkiaUnrefQueue );
57
- FML_FRIEND_MAKE_REF_COUNTED (SkiaUnrefQueue );
58
- FML_DISALLOW_COPY_AND_ASSIGN (SkiaUnrefQueue );
97
+ FML_FRIEND_REF_COUNTED_THREAD_SAFE (UnrefQueue );
98
+ FML_FRIEND_MAKE_REF_COUNTED (UnrefQueue );
99
+ FML_DISALLOW_COPY_AND_ASSIGN (UnrefQueue );
59
100
};
60
101
102
+ using SkiaUnrefQueue = UnrefQueue<GrDirectContext>;
103
+
61
104
// / An object whose deallocation needs to be performed on an specific unref
62
105
// / queue. The template argument U need to have a call operator that returns
63
106
// / that unref queue.
0 commit comments