[vlc-commits] qt: medialib: make cache load() asynchronous
Romain Vimont
git at videolan.org
Wed Dec 2 17:32:25 UTC 2020
vlc | branch: master | Romain Vimont <rom1v at videolabs.io> | Thu Nov 19 16:00:55 2020 +0100| [e5be287fcab83aff14cd825bfd3ba91ca4cdbc2a] | committer: Pierre Lamot
qt: medialib: make cache load() asynchronous
Make the list cache use async tasks to load the data asynchronously (and
notify data changes once available).
Signed-off-by: Pierre Lamot <pierre at videolabs.io>
> http://git.videolan.org/gitweb.cgi/vlc.git/?a=commit;h=e5be287fcab83aff14cd825bfd3ba91ca4cdbc2a
---
modules/gui/qt/medialibrary/mlbasemodel.hpp | 3 +-
modules/gui/qt/util/listcache.hpp | 125 ++++++++++++++++++++++++++--
2 files changed, 118 insertions(+), 10 deletions(-)
diff --git a/modules/gui/qt/medialibrary/mlbasemodel.hpp b/modules/gui/qt/medialibrary/mlbasemodel.hpp
index 39a67cdd46..22ae5e6300 100644
--- a/modules/gui/qt/medialibrary/mlbasemodel.hpp
+++ b/modules/gui/qt/medialibrary/mlbasemodel.hpp
@@ -219,8 +219,9 @@ protected:
if (m_cache)
return;
+ auto &threadPool = m_mediaLib->threadPool();
auto loader = createLoader();
- m_cache.reset(new ListCache<std::unique_ptr<T>>(loader));
+ m_cache.reset(new ListCache<std::unique_ptr<T>>(threadPool, loader));
connect(&*m_cache, &BaseListCache::localDataChanged,
this, &MLSlidingWindowModel<T>::onLocalDataChanged);
diff --git a/modules/gui/qt/util/listcache.hpp b/modules/gui/qt/util/listcache.hpp
index c762e84ede..fb9cfb5814 100644
--- a/modules/gui/qt/util/listcache.hpp
+++ b/modules/gui/qt/util/listcache.hpp
@@ -24,10 +24,13 @@
#endif
#include "vlc_common.h"
+#include <cassert>
#include <memory>
#include <vector>
#include <QtGlobal>
#include <QObject>
+#include <QSharedPointer>
+#include "asynctask.hpp"
/**
* `ListCache<T>` represents a cache for a (constant) list of items.
@@ -37,6 +40,9 @@
* - `count()` returns the number of items in the list;
* - `load(index, count)` returning the items for the requested interval.
*
+ * These functions are assumed to be long-running, so they executed from a
+ * separate thread, not to block the UI thread.
+ *
* The precise cache strategy is unspecified (it may change in the future), but
* the general principle is to keep locally only a part of the whole data.
*
@@ -67,6 +73,34 @@ class BaseListCache : public QObject
signals:
void localDataChanged(size_t index, size_t count);
+
+protected slots:
+ virtual void onLoadResult() = 0;
+};
+
+template <typename T>
+class LoadTask;
+
+struct MLRange
+{
+ size_t offset = 0;
+ size_t count = 0;
+
+ MLRange() = default;
+
+ MLRange(size_t offset, size_t count)
+ : offset(offset)
+ , count(count)
+ {
+ }
+
+ bool isEmpty() {
+ return count == 0;
+ }
+
+ bool contains(size_t index) {
+ return index >= offset && index < offset + count;
+ }
};
template <typename T>
@@ -75,9 +109,12 @@ class ListCache : public BaseListCache
public:
static constexpr ssize_t COUNT_UNINITIALIZED = -1;
- ListCache(ListCacheLoader<T> *loader, size_t chunkSize = 100)
- : m_loader(loader)
+ ListCache(QThreadPool &threadPool, ListCacheLoader<T> *loader,
+ size_t chunkSize = 100)
+ : m_threadPool(threadPool)
+ , m_loader(loader)
, m_chunkSize(chunkSize) {}
+ ~ListCache();
/**
* Return the item at specified index
@@ -110,14 +147,32 @@ public:
void refer(size_t index);
private:
- std::unique_ptr<ListCacheLoader<T>> m_loader;
+ void onLoadResult() override;
+
+ void asyncLoad(size_t offset, size_t count);
+
+ QThreadPool &m_threadPool;
+ /* Ownershipshared between this cache and the runnable spawned to execute
+ * loader callbacks */
+ QSharedPointer<ListCacheLoader<T>> m_loader;
size_t m_chunkSize;
std::vector<T> m_list;
ssize_t m_total_count = COUNT_UNINITIALIZED;
size_t m_offset = 0;
+
+ MLRange m_lastRangeRequested;
+
+ LoadTask<T> *m_loadTask = nullptr;
};
+template <typename T>
+ListCache<T>::~ListCache()
+{
+ if (m_loadTask)
+ m_loadTask->abandon();
+}
+
template <typename T>
const T *ListCache<T>::get(size_t index) const
{
@@ -156,16 +211,68 @@ void ListCache<T>::refer(size_t index)
}
/* index outside the known portion of the list */
- if (index < m_offset || index >= m_offset + m_list.size())
+ if (!m_lastRangeRequested.contains(index))
{
/* FIXME bad heuristic if the interval of visible items crosses a cache
* page boundary */
- m_offset = index - index % m_chunkSize;
- size_t count = qMin(m_total_count - m_offset, m_chunkSize);
- m_list = m_loader->load(m_offset, count);
- if (m_list.size())
- emit localDataChanged(m_offset, m_list.size());
+ size_t offset = index - index % m_chunkSize;
+ size_t count = qMin(m_total_count - offset, m_chunkSize);
+ asyncLoad(offset, count);
}
}
+template <typename T>
+class LoadTask : public AsyncTask<std::vector<T>>
+{
+public:
+ LoadTask(QSharedPointer<ListCacheLoader<T>> loader, size_t offset,
+ size_t count)
+ : m_loader(loader)
+ , m_offset(offset)
+ , m_count(count)
+ {
+ }
+
+ std::vector<T> execute() override
+ {
+ return m_loader->load(m_offset, m_count);
+ }
+
+private:
+ QSharedPointer<ListCacheLoader<T>> m_loader;
+ size_t m_offset;
+ size_t m_count;
+
+ friend class ListCache<T>;
+};
+
+template <typename T>
+void ListCache<T>::asyncLoad(size_t offset, size_t count)
+{
+ if (m_loadTask)
+ /* Cancel any current pending task */
+ m_loadTask->abandon();
+
+ m_loadTask = new LoadTask<T>(m_loader, offset, count);
+ connect(m_loadTask, &BaseAsyncTask::result,
+ this, &ListCache<T>::onLoadResult);
+ m_lastRangeRequested = { offset, count };
+ m_loadTask->start(m_threadPool);
+}
+
+template <typename T>
+void ListCache<T>::onLoadResult()
+{
+ LoadTask<T> *task = static_cast<LoadTask<T> *>(sender());
+ assert(task == m_loadTask);
+
+ m_offset = task->m_offset;
+ m_list = task->takeResult();
+ if (m_list.size())
+ emit localDataChanged(m_offset, m_list.size());
+
+ task->abandon();
+ m_loadTask = nullptr;
+}
+
#endif
More information about the vlc-commits
mailing list